MPX for amd64
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
28e7fd62 3 Copyright (C) 2002-2013 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
aa5ca48f 20#include <stddef.h>
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
58b4daa5 32#include "agent.h"
3aee8918 33#include "tdesc.h"
c144c7a0 34#include "tracepoint.h"
f699aaba 35#include "ax.h"
d0722149 36
3aee8918 37#ifdef __x86_64__
90884b2b
L
38/* Defined in auto-generated file amd64-linux.c. */
39void init_registers_amd64_linux (void);
3aee8918
PA
40extern const struct target_desc *tdesc_amd64_linux;
41
1570b33e
L
42/* Defined in auto-generated file amd64-avx-linux.c. */
43void init_registers_amd64_avx_linux (void);
3aee8918
PA
44extern const struct target_desc *tdesc_amd64_avx_linux;
45
4d47af5c
L
46/* Defined in auto-generated file x32-linux.c. */
47void init_registers_x32_linux (void);
3aee8918
PA
48extern const struct target_desc *tdesc_x32_linux;
49
4d47af5c
L
50/* Defined in auto-generated file x32-avx-linux.c. */
51void init_registers_x32_avx_linux (void);
3aee8918
PA
52extern const struct target_desc *tdesc_x32_avx_linux;
53#endif
54
55/* Defined in auto-generated file i386-linux.c. */
56void init_registers_i386_linux (void);
57extern const struct target_desc *tdesc_i386_linux;
58
59/* Defined in auto-generated file i386-mmx-linux.c. */
60void init_registers_i386_mmx_linux (void);
61extern const struct target_desc *tdesc_i386_mmx_linux;
62
63/* Defined in auto-generated file i386-avx-linux.c. */
64void init_registers_i386_avx_linux (void);
65extern const struct target_desc *tdesc_i386_avx_linux;
66
67#ifdef __x86_64__
68static struct target_desc *tdesc_amd64_linux_no_xml;
69#endif
70static struct target_desc *tdesc_i386_linux_no_xml;
71
1570b33e 72
fa593d66 73static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 74static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 75
1570b33e
L
76/* Backward compatibility for gdb without XML support. */
77
78static const char *xmltarget_i386_linux_no_xml = "@<target>\
79<architecture>i386</architecture>\
80<osabi>GNU/Linux</osabi>\
81</target>";
f6d1620c
L
82
83#ifdef __x86_64__
1570b33e
L
84static const char *xmltarget_amd64_linux_no_xml = "@<target>\
85<architecture>i386:x86-64</architecture>\
86<osabi>GNU/Linux</osabi>\
87</target>";
f6d1620c 88#endif
d0722149
DE
89
90#include <sys/reg.h>
91#include <sys/procfs.h>
92#include <sys/ptrace.h>
1570b33e
L
93#include <sys/uio.h>
94
95#ifndef PTRACE_GETREGSET
96#define PTRACE_GETREGSET 0x4204
97#endif
98
99#ifndef PTRACE_SETREGSET
100#define PTRACE_SETREGSET 0x4205
101#endif
102
d0722149
DE
103
104#ifndef PTRACE_GET_THREAD_AREA
105#define PTRACE_GET_THREAD_AREA 25
106#endif
107
108/* This definition comes from prctl.h, but some kernels may not have it. */
109#ifndef PTRACE_ARCH_PRCTL
110#define PTRACE_ARCH_PRCTL 30
111#endif
112
113/* The following definitions come from prctl.h, but may be absent
114 for certain configurations. */
115#ifndef ARCH_GET_FS
116#define ARCH_SET_GS 0x1001
117#define ARCH_SET_FS 0x1002
118#define ARCH_GET_FS 0x1003
119#define ARCH_GET_GS 0x1004
120#endif
121
aa5ca48f
DE
122/* Per-process arch-specific data we want to keep. */
123
124struct arch_process_info
125{
126 struct i386_debug_reg_state debug_reg_state;
127};
128
129/* Per-thread arch-specific data we want to keep. */
130
131struct arch_lwp_info
132{
133 /* Non-zero if our copy differs from what's recorded in the thread. */
134 int debug_registers_changed;
135};
136
d0722149
DE
137#ifdef __x86_64__
138
139/* Mapping between the general-purpose registers in `struct user'
140 format and GDB's register array layout.
141 Note that the transfer layout uses 64-bit regs. */
142static /*const*/ int i386_regmap[] =
143{
144 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
145 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
146 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
147 DS * 8, ES * 8, FS * 8, GS * 8
148};
149
150#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
151
152/* So code below doesn't have to care, i386 or amd64. */
153#define ORIG_EAX ORIG_RAX
154
155static const int x86_64_regmap[] =
156{
157 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
158 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
159 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
160 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
161 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
162 DS * 8, ES * 8, FS * 8, GS * 8,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1, -1, -1, -1, -1, -1, -1, -1,
166 -1, -1, -1, -1, -1, -1, -1, -1, -1,
167 ORIG_RAX * 8
168};
169
170#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
171
172#else /* ! __x86_64__ */
173
174/* Mapping between the general-purpose registers in `struct user'
175 format and GDB's register array layout. */
176static /*const*/ int i386_regmap[] =
177{
178 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
179 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
180 EIP * 4, EFL * 4, CS * 4, SS * 4,
181 DS * 4, ES * 4, FS * 4, GS * 4
182};
183
184#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
185
186#endif
3aee8918
PA
187
188#ifdef __x86_64__
189
190/* Returns true if the current inferior belongs to a x86-64 process,
191 per the tdesc. */
192
193static int
194is_64bit_tdesc (void)
195{
196 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
197
198 return register_size (regcache->tdesc, 0) == 8;
199}
200
201#endif
202
d0722149
DE
203\f
204/* Called by libthread_db. */
205
206ps_err_e
207ps_get_thread_area (const struct ps_prochandle *ph,
208 lwpid_t lwpid, int idx, void **base)
209{
210#ifdef __x86_64__
3aee8918 211 int use_64bit = is_64bit_tdesc ();
d0722149
DE
212
213 if (use_64bit)
214 {
215 switch (idx)
216 {
217 case FS:
218 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
219 return PS_OK;
220 break;
221 case GS:
222 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
223 return PS_OK;
224 break;
225 default:
226 return PS_BADADDR;
227 }
228 return PS_ERR;
229 }
230#endif
231
232 {
233 unsigned int desc[4];
234
235 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
236 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
237 return PS_ERR;
238
d1ec4ce7
DE
239 /* Ensure we properly extend the value to 64-bits for x86_64. */
240 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
241 return PS_OK;
242 }
243}
fa593d66
PA
244
245/* Get the thread area address. This is used to recognize which
246 thread is which when tracing with the in-process agent library. We
247 don't read anything from the address, and treat it as opaque; it's
248 the address itself that we assume is unique per-thread. */
249
250static int
251x86_get_thread_area (int lwpid, CORE_ADDR *addr)
252{
253#ifdef __x86_64__
3aee8918 254 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
255
256 if (use_64bit)
257 {
258 void *base;
259 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
260 {
261 *addr = (CORE_ADDR) (uintptr_t) base;
262 return 0;
263 }
264
265 return -1;
266 }
267#endif
268
269 {
270 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
271 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
272 unsigned int desc[4];
273 ULONGEST gs = 0;
274 const int reg_thread_area = 3; /* bits to scale down register value. */
275 int idx;
276
277 collect_register_by_name (regcache, "gs", &gs);
278
279 idx = gs >> reg_thread_area;
280
281 if (ptrace (PTRACE_GET_THREAD_AREA,
493e2a69
MS
282 lwpid_of (lwp),
283 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
284 return -1;
285
286 *addr = desc[1];
287 return 0;
288 }
289}
290
291
d0722149
DE
292\f
293static int
3aee8918 294x86_cannot_store_register (int regno)
d0722149 295{
3aee8918
PA
296#ifdef __x86_64__
297 if (is_64bit_tdesc ())
298 return 0;
299#endif
300
d0722149
DE
301 return regno >= I386_NUM_REGS;
302}
303
304static int
3aee8918 305x86_cannot_fetch_register (int regno)
d0722149 306{
3aee8918
PA
307#ifdef __x86_64__
308 if (is_64bit_tdesc ())
309 return 0;
310#endif
311
d0722149
DE
312 return regno >= I386_NUM_REGS;
313}
314
315static void
442ea881 316x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
317{
318 int i;
319
320#ifdef __x86_64__
3aee8918 321 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
322 {
323 for (i = 0; i < X86_64_NUM_REGS; i++)
324 if (x86_64_regmap[i] != -1)
442ea881 325 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
326 return;
327 }
328#endif
329
330 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 331 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 332
442ea881
PA
333 collect_register_by_name (regcache, "orig_eax",
334 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
335}
336
337static void
442ea881 338x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
339{
340 int i;
341
342#ifdef __x86_64__
3aee8918 343 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
344 {
345 for (i = 0; i < X86_64_NUM_REGS; i++)
346 if (x86_64_regmap[i] != -1)
442ea881 347 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
348 return;
349 }
350#endif
351
352 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 353 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 354
442ea881
PA
355 supply_register_by_name (regcache, "orig_eax",
356 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
357}
358
359static void
442ea881 360x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
361{
362#ifdef __x86_64__
442ea881 363 i387_cache_to_fxsave (regcache, buf);
d0722149 364#else
442ea881 365 i387_cache_to_fsave (regcache, buf);
d0722149
DE
366#endif
367}
368
369static void
442ea881 370x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
371{
372#ifdef __x86_64__
442ea881 373 i387_fxsave_to_cache (regcache, buf);
d0722149 374#else
442ea881 375 i387_fsave_to_cache (regcache, buf);
d0722149
DE
376#endif
377}
378
379#ifndef __x86_64__
380
381static void
442ea881 382x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 383{
442ea881 384 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
385}
386
387static void
442ea881 388x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 389{
442ea881 390 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
391}
392
393#endif
394
1570b33e
L
395static void
396x86_fill_xstateregset (struct regcache *regcache, void *buf)
397{
398 i387_cache_to_xsave (regcache, buf);
399}
400
401static void
402x86_store_xstateregset (struct regcache *regcache, const void *buf)
403{
404 i387_xsave_to_cache (regcache, buf);
405}
406
d0722149
DE
407/* ??? The non-biarch i386 case stores all the i387 regs twice.
408 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
409 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
410 doesn't work. IWBN to avoid the duplication in the case where it
411 does work. Maybe the arch_setup routine could check whether it works
3aee8918 412 and update the supported regsets accordingly. */
d0722149 413
3aee8918 414static struct regset_info x86_regsets[] =
d0722149
DE
415{
416#ifdef HAVE_PTRACE_GETREGS
1570b33e 417 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
418 GENERAL_REGS,
419 x86_fill_gregset, x86_store_gregset },
1570b33e
L
420 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
421 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
422# ifndef __x86_64__
423# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 424 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
425 EXTENDED_REGS,
426 x86_fill_fpxregset, x86_store_fpxregset },
427# endif
428# endif
1570b33e 429 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
430 FP_REGS,
431 x86_fill_fpregset, x86_store_fpregset },
432#endif /* HAVE_PTRACE_GETREGS */
1570b33e 433 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
434};
435
436static CORE_ADDR
442ea881 437x86_get_pc (struct regcache *regcache)
d0722149 438{
3aee8918 439 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
440
441 if (use_64bit)
442 {
443 unsigned long pc;
442ea881 444 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
445 return (CORE_ADDR) pc;
446 }
447 else
448 {
449 unsigned int pc;
442ea881 450 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
451 return (CORE_ADDR) pc;
452 }
453}
454
455static void
442ea881 456x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 457{
3aee8918 458 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
459
460 if (use_64bit)
461 {
462 unsigned long newpc = pc;
442ea881 463 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
464 }
465 else
466 {
467 unsigned int newpc = pc;
442ea881 468 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
469 }
470}
471\f
472static const unsigned char x86_breakpoint[] = { 0xCC };
473#define x86_breakpoint_len 1
474
475static int
476x86_breakpoint_at (CORE_ADDR pc)
477{
478 unsigned char c;
479
fc7238bb 480 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
481 if (c == 0xCC)
482 return 1;
483
484 return 0;
485}
486\f
aa5ca48f
DE
487/* Support for debug registers. */
488
489static unsigned long
490x86_linux_dr_get (ptid_t ptid, int regnum)
491{
492 int tid;
493 unsigned long value;
494
495 tid = ptid_get_lwp (ptid);
496
497 errno = 0;
498 value = ptrace (PTRACE_PEEKUSER, tid,
499 offsetof (struct user, u_debugreg[regnum]), 0);
500 if (errno != 0)
501 error ("Couldn't read debug register");
502
503 return value;
504}
505
506static void
507x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
508{
509 int tid;
510
511 tid = ptid_get_lwp (ptid);
512
513 errno = 0;
514 ptrace (PTRACE_POKEUSER, tid,
515 offsetof (struct user, u_debugreg[regnum]), value);
516 if (errno != 0)
517 error ("Couldn't write debug register");
518}
519
964e4306
PA
520static int
521update_debug_registers_callback (struct inferior_list_entry *entry,
522 void *pid_p)
523{
524 struct lwp_info *lwp = (struct lwp_info *) entry;
525 int pid = *(int *) pid_p;
526
527 /* Only update the threads of this process. */
528 if (pid_of (lwp) == pid)
529 {
530 /* The actual update is done later just before resuming the lwp,
531 we just mark that the registers need updating. */
532 lwp->arch_private->debug_registers_changed = 1;
533
534 /* If the lwp isn't stopped, force it to momentarily pause, so
535 we can update its debug registers. */
536 if (!lwp->stopped)
537 linux_stop_lwp (lwp);
538 }
539
540 return 0;
541}
542
aa5ca48f
DE
543/* Update the inferior's debug register REGNUM from STATE. */
544
545void
546i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
547{
964e4306 548 /* Only update the threads of this process. */
aa5ca48f
DE
549 int pid = pid_of (get_thread_lwp (current_inferior));
550
551 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
552 fatal ("Invalid debug register %d", regnum);
553
964e4306
PA
554 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
555}
aa5ca48f 556
964e4306 557/* Return the inferior's debug register REGNUM. */
aa5ca48f 558
964e4306
PA
559CORE_ADDR
560i386_dr_low_get_addr (int regnum)
561{
562 struct lwp_info *lwp = get_thread_lwp (current_inferior);
563 ptid_t ptid = ptid_of (lwp);
564
565 /* DR6 and DR7 are retrieved with some other way. */
0a5b1e09 566 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
964e4306
PA
567
568 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
569}
570
571/* Update the inferior's DR7 debug control register from STATE. */
572
573void
574i386_dr_low_set_control (const struct i386_debug_reg_state *state)
575{
964e4306 576 /* Only update the threads of this process. */
aa5ca48f
DE
577 int pid = pid_of (get_thread_lwp (current_inferior));
578
964e4306
PA
579 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
580}
aa5ca48f 581
964e4306
PA
582/* Return the inferior's DR7 debug control register. */
583
584unsigned
585i386_dr_low_get_control (void)
586{
587 struct lwp_info *lwp = get_thread_lwp (current_inferior);
588 ptid_t ptid = ptid_of (lwp);
589
590 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
591}
592
593/* Get the value of the DR6 debug status register from the inferior
594 and record it in STATE. */
595
964e4306
PA
596unsigned
597i386_dr_low_get_status (void)
aa5ca48f
DE
598{
599 struct lwp_info *lwp = get_thread_lwp (current_inferior);
600 ptid_t ptid = ptid_of (lwp);
601
964e4306 602 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
603}
604\f
90d74c30 605/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
606
607static int
608x86_insert_point (char type, CORE_ADDR addr, int len)
609{
610 struct process_info *proc = current_process ();
611 switch (type)
612 {
961bd387 613 case '0': /* software-breakpoint */
90d74c30
PA
614 {
615 int ret;
616
617 ret = prepare_to_access_memory ();
618 if (ret)
619 return -1;
620 ret = set_gdb_breakpoint_at (addr);
0146f85b 621 done_accessing_memory ();
90d74c30
PA
622 return ret;
623 }
961bd387
ME
624 case '1': /* hardware-breakpoint */
625 case '2': /* write watchpoint */
626 case '3': /* read watchpoint */
627 case '4': /* access watchpoint */
aa5ca48f
DE
628 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
629 type, addr, len);
961bd387 630
aa5ca48f
DE
631 default:
632 /* Unsupported. */
633 return 1;
634 }
635}
636
637static int
638x86_remove_point (char type, CORE_ADDR addr, int len)
639{
640 struct process_info *proc = current_process ();
641 switch (type)
642 {
961bd387 643 case '0': /* software-breakpoint */
90d74c30
PA
644 {
645 int ret;
646
647 ret = prepare_to_access_memory ();
648 if (ret)
649 return -1;
650 ret = delete_gdb_breakpoint_at (addr);
0146f85b 651 done_accessing_memory ();
90d74c30
PA
652 return ret;
653 }
961bd387
ME
654 case '1': /* hardware-breakpoint */
655 case '2': /* write watchpoint */
656 case '3': /* read watchpoint */
657 case '4': /* access watchpoint */
aa5ca48f
DE
658 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
659 type, addr, len);
660 default:
661 /* Unsupported. */
662 return 1;
663 }
664}
665
666static int
667x86_stopped_by_watchpoint (void)
668{
669 struct process_info *proc = current_process ();
670 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
671}
672
673static CORE_ADDR
674x86_stopped_data_address (void)
675{
676 struct process_info *proc = current_process ();
677 CORE_ADDR addr;
678 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
679 &addr))
680 return addr;
681 return 0;
682}
683\f
684/* Called when a new process is created. */
685
686static struct arch_process_info *
687x86_linux_new_process (void)
688{
689 struct arch_process_info *info = xcalloc (1, sizeof (*info));
690
691 i386_low_init_dregs (&info->debug_reg_state);
692
693 return info;
694}
695
696/* Called when a new thread is detected. */
697
698static struct arch_lwp_info *
699x86_linux_new_thread (void)
700{
701 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
702
703 info->debug_registers_changed = 1;
704
705 return info;
706}
707
708/* Called when resuming a thread.
709 If the debug regs have changed, update the thread's copies. */
710
711static void
712x86_linux_prepare_to_resume (struct lwp_info *lwp)
713{
b9a881c2 714 ptid_t ptid = ptid_of (lwp);
6210a125 715 int clear_status = 0;
b9a881c2 716
aa5ca48f
DE
717 if (lwp->arch_private->debug_registers_changed)
718 {
719 int i;
aa5ca48f
DE
720 int pid = ptid_get_pid (ptid);
721 struct process_info *proc = find_process_pid (pid);
493e2a69
MS
722 struct i386_debug_reg_state *state
723 = &proc->private->arch_private->debug_reg_state;
aa5ca48f
DE
724
725 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
6210a125
PA
726 if (state->dr_ref_count[i] > 0)
727 {
728 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
729
730 /* If we're setting a watchpoint, any change the inferior
731 had done itself to the debug registers needs to be
732 discarded, otherwise, i386_low_stopped_data_address can
733 get confused. */
734 clear_status = 1;
735 }
aa5ca48f
DE
736
737 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
738
739 lwp->arch_private->debug_registers_changed = 0;
740 }
b9a881c2 741
6210a125 742 if (clear_status || lwp->stopped_by_watchpoint)
b9a881c2 743 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
744}
745\f
d0722149
DE
746/* When GDBSERVER is built as a 64-bit application on linux, the
747 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
748 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
749 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
750 conversion in-place ourselves. */
751
752/* These types below (compat_*) define a siginfo type that is layout
753 compatible with the siginfo type exported by the 32-bit userspace
754 support. */
755
756#ifdef __x86_64__
757
758typedef int compat_int_t;
759typedef unsigned int compat_uptr_t;
760
761typedef int compat_time_t;
762typedef int compat_timer_t;
763typedef int compat_clock_t;
764
765struct compat_timeval
766{
767 compat_time_t tv_sec;
768 int tv_usec;
769};
770
771typedef union compat_sigval
772{
773 compat_int_t sival_int;
774 compat_uptr_t sival_ptr;
775} compat_sigval_t;
776
777typedef struct compat_siginfo
778{
779 int si_signo;
780 int si_errno;
781 int si_code;
782
783 union
784 {
785 int _pad[((128 / sizeof (int)) - 3)];
786
787 /* kill() */
788 struct
789 {
790 unsigned int _pid;
791 unsigned int _uid;
792 } _kill;
793
794 /* POSIX.1b timers */
795 struct
796 {
797 compat_timer_t _tid;
798 int _overrun;
799 compat_sigval_t _sigval;
800 } _timer;
801
802 /* POSIX.1b signals */
803 struct
804 {
805 unsigned int _pid;
806 unsigned int _uid;
807 compat_sigval_t _sigval;
808 } _rt;
809
810 /* SIGCHLD */
811 struct
812 {
813 unsigned int _pid;
814 unsigned int _uid;
815 int _status;
816 compat_clock_t _utime;
817 compat_clock_t _stime;
818 } _sigchld;
819
820 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
821 struct
822 {
823 unsigned int _addr;
824 } _sigfault;
825
826 /* SIGPOLL */
827 struct
828 {
829 int _band;
830 int _fd;
831 } _sigpoll;
832 } _sifields;
833} compat_siginfo_t;
834
c92b5177
L
835/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
836typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
837
838typedef struct compat_x32_siginfo
839{
840 int si_signo;
841 int si_errno;
842 int si_code;
843
844 union
845 {
846 int _pad[((128 / sizeof (int)) - 3)];
847
848 /* kill() */
849 struct
850 {
851 unsigned int _pid;
852 unsigned int _uid;
853 } _kill;
854
855 /* POSIX.1b timers */
856 struct
857 {
858 compat_timer_t _tid;
859 int _overrun;
860 compat_sigval_t _sigval;
861 } _timer;
862
863 /* POSIX.1b signals */
864 struct
865 {
866 unsigned int _pid;
867 unsigned int _uid;
868 compat_sigval_t _sigval;
869 } _rt;
870
871 /* SIGCHLD */
872 struct
873 {
874 unsigned int _pid;
875 unsigned int _uid;
876 int _status;
877 compat_x32_clock_t _utime;
878 compat_x32_clock_t _stime;
879 } _sigchld;
880
881 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
882 struct
883 {
884 unsigned int _addr;
885 } _sigfault;
886
887 /* SIGPOLL */
888 struct
889 {
890 int _band;
891 int _fd;
892 } _sigpoll;
893 } _sifields;
894} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
895
d0722149
DE
896#define cpt_si_pid _sifields._kill._pid
897#define cpt_si_uid _sifields._kill._uid
898#define cpt_si_timerid _sifields._timer._tid
899#define cpt_si_overrun _sifields._timer._overrun
900#define cpt_si_status _sifields._sigchld._status
901#define cpt_si_utime _sifields._sigchld._utime
902#define cpt_si_stime _sifields._sigchld._stime
903#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
904#define cpt_si_addr _sifields._sigfault._addr
905#define cpt_si_band _sifields._sigpoll._band
906#define cpt_si_fd _sifields._sigpoll._fd
907
908/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
909 In their place is si_timer1,si_timer2. */
910#ifndef si_timerid
911#define si_timerid si_timer1
912#endif
913#ifndef si_overrun
914#define si_overrun si_timer2
915#endif
916
917static void
918compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
919{
920 memset (to, 0, sizeof (*to));
921
922 to->si_signo = from->si_signo;
923 to->si_errno = from->si_errno;
924 to->si_code = from->si_code;
925
b53a1623 926 if (to->si_code == SI_TIMER)
d0722149 927 {
b53a1623
PA
928 to->cpt_si_timerid = from->si_timerid;
929 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
930 to->cpt_si_ptr = (intptr_t) from->si_ptr;
931 }
932 else if (to->si_code == SI_USER)
933 {
934 to->cpt_si_pid = from->si_pid;
935 to->cpt_si_uid = from->si_uid;
936 }
b53a1623 937 else if (to->si_code < 0)
d0722149 938 {
b53a1623
PA
939 to->cpt_si_pid = from->si_pid;
940 to->cpt_si_uid = from->si_uid;
d0722149
DE
941 to->cpt_si_ptr = (intptr_t) from->si_ptr;
942 }
943 else
944 {
945 switch (to->si_signo)
946 {
947 case SIGCHLD:
948 to->cpt_si_pid = from->si_pid;
949 to->cpt_si_uid = from->si_uid;
950 to->cpt_si_status = from->si_status;
951 to->cpt_si_utime = from->si_utime;
952 to->cpt_si_stime = from->si_stime;
953 break;
954 case SIGILL:
955 case SIGFPE:
956 case SIGSEGV:
957 case SIGBUS:
958 to->cpt_si_addr = (intptr_t) from->si_addr;
959 break;
960 case SIGPOLL:
961 to->cpt_si_band = from->si_band;
962 to->cpt_si_fd = from->si_fd;
963 break;
964 default:
965 to->cpt_si_pid = from->si_pid;
966 to->cpt_si_uid = from->si_uid;
967 to->cpt_si_ptr = (intptr_t) from->si_ptr;
968 break;
969 }
970 }
971}
972
973static void
974siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
975{
976 memset (to, 0, sizeof (*to));
977
978 to->si_signo = from->si_signo;
979 to->si_errno = from->si_errno;
980 to->si_code = from->si_code;
981
b53a1623 982 if (to->si_code == SI_TIMER)
d0722149 983 {
b53a1623
PA
984 to->si_timerid = from->cpt_si_timerid;
985 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
986 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
987 }
988 else if (to->si_code == SI_USER)
989 {
990 to->si_pid = from->cpt_si_pid;
991 to->si_uid = from->cpt_si_uid;
992 }
b53a1623 993 else if (to->si_code < 0)
d0722149 994 {
b53a1623
PA
995 to->si_pid = from->cpt_si_pid;
996 to->si_uid = from->cpt_si_uid;
d0722149
DE
997 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
998 }
999 else
1000 {
1001 switch (to->si_signo)
1002 {
1003 case SIGCHLD:
1004 to->si_pid = from->cpt_si_pid;
1005 to->si_uid = from->cpt_si_uid;
1006 to->si_status = from->cpt_si_status;
1007 to->si_utime = from->cpt_si_utime;
1008 to->si_stime = from->cpt_si_stime;
1009 break;
1010 case SIGILL:
1011 case SIGFPE:
1012 case SIGSEGV:
1013 case SIGBUS:
1014 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1015 break;
1016 case SIGPOLL:
1017 to->si_band = from->cpt_si_band;
1018 to->si_fd = from->cpt_si_fd;
1019 break;
1020 default:
1021 to->si_pid = from->cpt_si_pid;
1022 to->si_uid = from->cpt_si_uid;
1023 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1024 break;
1025 }
1026 }
1027}
1028
c92b5177
L
1029static void
1030compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1031 siginfo_t *from)
1032{
1033 memset (to, 0, sizeof (*to));
1034
1035 to->si_signo = from->si_signo;
1036 to->si_errno = from->si_errno;
1037 to->si_code = from->si_code;
1038
1039 if (to->si_code == SI_TIMER)
1040 {
1041 to->cpt_si_timerid = from->si_timerid;
1042 to->cpt_si_overrun = from->si_overrun;
1043 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1044 }
1045 else if (to->si_code == SI_USER)
1046 {
1047 to->cpt_si_pid = from->si_pid;
1048 to->cpt_si_uid = from->si_uid;
1049 }
1050 else if (to->si_code < 0)
1051 {
1052 to->cpt_si_pid = from->si_pid;
1053 to->cpt_si_uid = from->si_uid;
1054 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1055 }
1056 else
1057 {
1058 switch (to->si_signo)
1059 {
1060 case SIGCHLD:
1061 to->cpt_si_pid = from->si_pid;
1062 to->cpt_si_uid = from->si_uid;
1063 to->cpt_si_status = from->si_status;
1064 to->cpt_si_utime = from->si_utime;
1065 to->cpt_si_stime = from->si_stime;
1066 break;
1067 case SIGILL:
1068 case SIGFPE:
1069 case SIGSEGV:
1070 case SIGBUS:
1071 to->cpt_si_addr = (intptr_t) from->si_addr;
1072 break;
1073 case SIGPOLL:
1074 to->cpt_si_band = from->si_band;
1075 to->cpt_si_fd = from->si_fd;
1076 break;
1077 default:
1078 to->cpt_si_pid = from->si_pid;
1079 to->cpt_si_uid = from->si_uid;
1080 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1081 break;
1082 }
1083 }
1084}
1085
1086static void
1087siginfo_from_compat_x32_siginfo (siginfo_t *to,
1088 compat_x32_siginfo_t *from)
1089{
1090 memset (to, 0, sizeof (*to));
1091
1092 to->si_signo = from->si_signo;
1093 to->si_errno = from->si_errno;
1094 to->si_code = from->si_code;
1095
1096 if (to->si_code == SI_TIMER)
1097 {
1098 to->si_timerid = from->cpt_si_timerid;
1099 to->si_overrun = from->cpt_si_overrun;
1100 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1101 }
1102 else if (to->si_code == SI_USER)
1103 {
1104 to->si_pid = from->cpt_si_pid;
1105 to->si_uid = from->cpt_si_uid;
1106 }
1107 else if (to->si_code < 0)
1108 {
1109 to->si_pid = from->cpt_si_pid;
1110 to->si_uid = from->cpt_si_uid;
1111 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1112 }
1113 else
1114 {
1115 switch (to->si_signo)
1116 {
1117 case SIGCHLD:
1118 to->si_pid = from->cpt_si_pid;
1119 to->si_uid = from->cpt_si_uid;
1120 to->si_status = from->cpt_si_status;
1121 to->si_utime = from->cpt_si_utime;
1122 to->si_stime = from->cpt_si_stime;
1123 break;
1124 case SIGILL:
1125 case SIGFPE:
1126 case SIGSEGV:
1127 case SIGBUS:
1128 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1129 break;
1130 case SIGPOLL:
1131 to->si_band = from->cpt_si_band;
1132 to->si_fd = from->cpt_si_fd;
1133 break;
1134 default:
1135 to->si_pid = from->cpt_si_pid;
1136 to->si_uid = from->cpt_si_uid;
1137 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1138 break;
1139 }
1140 }
1141}
1142
d0722149
DE
1143#endif /* __x86_64__ */
1144
1145/* Convert a native/host siginfo object, into/from the siginfo in the
1146 layout of the inferiors' architecture. Returns true if any
1147 conversion was done; false otherwise. If DIRECTION is 1, then copy
1148 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1149 INF. */
1150
1151static int
a5362b9a 1152x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1153{
1154#ifdef __x86_64__
760256f9
PA
1155 unsigned int machine;
1156 int tid = lwpid_of (get_thread_lwp (current_inferior));
1157 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1158
d0722149 1159 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1160 if (!is_64bit_tdesc ())
d0722149 1161 {
a5362b9a 1162 if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
9f1036c1 1163 fatal ("unexpected difference in siginfo");
d0722149
DE
1164
1165 if (direction == 0)
1166 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1167 else
1168 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1169
c92b5177
L
1170 return 1;
1171 }
1172 /* No fixup for native x32 GDB. */
760256f9 1173 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177
L
1174 {
1175 if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
1176 fatal ("unexpected difference in siginfo");
1177
1178 if (direction == 0)
1179 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1180 native);
1181 else
1182 siginfo_from_compat_x32_siginfo (native,
1183 (struct compat_x32_siginfo *) inf);
1184
d0722149
DE
1185 return 1;
1186 }
1187#endif
1188
1189 return 0;
1190}
1191\f
1570b33e
L
1192static int use_xml;
1193
3aee8918
PA
1194/* Format of XSAVE extended state is:
1195 struct
1196 {
1197 fxsave_bytes[0..463]
1198 sw_usable_bytes[464..511]
1199 xstate_hdr_bytes[512..575]
1200 avx_bytes[576..831]
1201 future_state etc
1202 };
1203
1204 Same memory layout will be used for the coredump NT_X86_XSTATE
1205 representing the XSAVE extended state registers.
1206
1207 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1208 extended state mask, which is the same as the extended control register
1209 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1210 together with the mask saved in the xstate_hdr_bytes to determine what
1211 states the processor/OS supports and what state, used or initialized,
1212 the process/thread is in. */
1213#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1214
1215/* Does the current host support the GETFPXREGS request? The header
1216 file may or may not define it, and even if it is defined, the
1217 kernel will return EIO if it's running on a pre-SSE processor. */
1218int have_ptrace_getfpxregs =
1219#ifdef HAVE_PTRACE_GETFPXREGS
1220 -1
1221#else
1222 0
1223#endif
1224;
1570b33e 1225
3aee8918
PA
1226/* Does the current host support PTRACE_GETREGSET? */
1227static int have_ptrace_getregset = -1;
1228
1229/* Get Linux/x86 target description from running target. */
1230
1231static const struct target_desc *
1232x86_linux_read_description (void)
1570b33e 1233{
3aee8918
PA
1234 unsigned int machine;
1235 int is_elf64;
1236 int avx;
1237 int tid;
1238 static uint64_t xcr0;
3a13a53b 1239 struct regset_info *regset;
1570b33e 1240
3aee8918 1241 tid = lwpid_of (get_thread_lwp (current_inferior));
1570b33e 1242
3aee8918 1243 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1244
3aee8918 1245 if (sizeof (void *) == 4)
3a13a53b 1246 {
3aee8918
PA
1247 if (is_elf64 > 0)
1248 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1249#ifndef __x86_64__
1250 else if (machine == EM_X86_64)
1251 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1252#endif
1253 }
3a13a53b 1254
3aee8918
PA
1255#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1256 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1257 {
1258 elf_fpxregset_t fpxregs;
3a13a53b 1259
3aee8918 1260 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1261 {
3aee8918
PA
1262 have_ptrace_getfpxregs = 0;
1263 have_ptrace_getregset = 0;
1264 return tdesc_i386_mmx_linux;
3a13a53b 1265 }
3aee8918
PA
1266 else
1267 have_ptrace_getfpxregs = 1;
3a13a53b 1268 }
1570b33e
L
1269#endif
1270
1271 if (!use_xml)
1272 {
3aee8918
PA
1273 x86_xcr0 = I386_XSTATE_SSE_MASK;
1274
1570b33e
L
1275 /* Don't use XML. */
1276#ifdef __x86_64__
3aee8918
PA
1277 if (machine == EM_X86_64)
1278 return tdesc_amd64_linux_no_xml;
1570b33e 1279 else
1570b33e 1280#endif
3aee8918 1281 return tdesc_i386_linux_no_xml;
1570b33e
L
1282 }
1283
1570b33e
L
1284 if (have_ptrace_getregset == -1)
1285 {
3aee8918 1286 uint64_t xstateregs[(I386_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1287 struct iovec iov;
1570b33e
L
1288
1289 iov.iov_base = xstateregs;
1290 iov.iov_len = sizeof (xstateregs);
1291
1292 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1293 if (ptrace (PTRACE_GETREGSET, tid,
1294 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1295 have_ptrace_getregset = 0;
1296 else
1570b33e 1297 {
3aee8918
PA
1298 have_ptrace_getregset = 1;
1299
1300 /* Get XCR0 from XSAVE extended state. */
1301 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1302 / sizeof (uint64_t))];
1303
1304 /* Use PTRACE_GETREGSET if it is available. */
1305 for (regset = x86_regsets;
1306 regset->fill_function != NULL; regset++)
1307 if (regset->get_request == PTRACE_GETREGSET)
1308 regset->size = I386_XSTATE_SIZE (xcr0);
1309 else if (regset->type != GENERAL_REGS)
1310 regset->size = 0;
1570b33e 1311 }
1570b33e
L
1312 }
1313
3aee8918
PA
1314 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1315 avx = (have_ptrace_getregset
1316 && (xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK);
1317
1318 /* AVX is the highest feature we support. */
1319 if (avx)
1320 x86_xcr0 = xcr0;
1570b33e 1321
3aee8918
PA
1322 if (machine == EM_X86_64)
1323 {
1570b33e 1324#ifdef __x86_64__
3aee8918
PA
1325 if (avx)
1326 {
1327 if (!is_elf64)
1328 return tdesc_x32_avx_linux;
4d47af5c 1329 else
3aee8918
PA
1330 return tdesc_amd64_avx_linux;
1331 }
1332 else
1333 {
1334 if (!is_elf64)
1335 return tdesc_x32_linux;
1336 else
1337 return tdesc_amd64_linux;
1570b33e 1338 }
3aee8918 1339#endif
1570b33e 1340 }
3aee8918
PA
1341 else
1342 {
1343 if (avx)
1344 return tdesc_i386_avx_linux;
1345 else
1346 return tdesc_i386_linux;
1347 }
1348
1349 gdb_assert_not_reached ("failed to return tdesc");
1350}
1351
1352/* Callback for find_inferior. Stops iteration when a thread with a
1353 given PID is found. */
1354
1355static int
1356same_process_callback (struct inferior_list_entry *entry, void *data)
1357{
1358 int pid = *(int *) data;
1359
1360 return (ptid_get_pid (entry->id) == pid);
1361}
1362
1363/* Callback for for_each_inferior. Calls the arch_setup routine for
1364 each process. */
1365
1366static void
1367x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1368{
1369 int pid = ptid_get_pid (entry->id);
1370
1371 /* Look up any thread of this processes. */
1372 current_inferior
1373 = (struct thread_info *) find_inferior (&all_threads,
1374 same_process_callback, &pid);
1375
1376 the_low_target.arch_setup ();
1377}
1378
1379/* Update all the target description of all processes; a new GDB
1380 connected, and it may or not support xml target descriptions. */
1381
1382static void
1383x86_linux_update_xmltarget (void)
1384{
1385 struct thread_info *save_inferior = current_inferior;
1386
1387 /* Before changing the register cache's internal layout, flush the
1388 contents of the current valid caches back to the threads, and
1389 release the current regcache objects. */
1390 regcache_release ();
1391
1392 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1393
1394 current_inferior = save_inferior;
1570b33e
L
1395}
1396
1397/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1398 PTRACE_GETREGSET. */
1399
1400static void
1401x86_linux_process_qsupported (const char *query)
1402{
1403 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1404 with "i386" in qSupported query, it supports x86 XML target
1405 descriptions. */
1406 use_xml = 0;
1407 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1408 {
1409 char *copy = xstrdup (query + 13);
1410 char *p;
1411
1412 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1413 {
1414 if (strcmp (p, "i386") == 0)
1415 {
1416 use_xml = 1;
1417 break;
1418 }
1419 }
1420
1421 free (copy);
1422 }
1423
1424 x86_linux_update_xmltarget ();
1425}
1426
3aee8918 1427/* Common for x86/x86-64. */
d0722149 1428
3aee8918
PA
1429static struct regsets_info x86_regsets_info =
1430 {
1431 x86_regsets, /* regsets */
1432 0, /* num_regsets */
1433 NULL, /* disabled_regsets */
1434 };
214d508e
L
1435
1436#ifdef __x86_64__
3aee8918
PA
1437static struct regs_info amd64_linux_regs_info =
1438 {
1439 NULL, /* regset_bitmap */
1440 NULL, /* usrregs_info */
1441 &x86_regsets_info
1442 };
d0722149 1443#endif
3aee8918
PA
1444static struct usrregs_info i386_linux_usrregs_info =
1445 {
1446 I386_NUM_REGS,
1447 i386_regmap,
1448 };
d0722149 1449
3aee8918
PA
1450static struct regs_info i386_linux_regs_info =
1451 {
1452 NULL, /* regset_bitmap */
1453 &i386_linux_usrregs_info,
1454 &x86_regsets_info
1455 };
d0722149 1456
3aee8918
PA
1457const struct regs_info *
1458x86_linux_regs_info (void)
1459{
1460#ifdef __x86_64__
1461 if (is_64bit_tdesc ())
1462 return &amd64_linux_regs_info;
1463 else
1464#endif
1465 return &i386_linux_regs_info;
1466}
d0722149 1467
3aee8918
PA
1468/* Initialize the target description for the architecture of the
1469 inferior. */
1570b33e 1470
3aee8918
PA
1471static void
1472x86_arch_setup (void)
1473{
1474 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1475}
1476
219f2f23
PA
1477static int
1478x86_supports_tracepoints (void)
1479{
1480 return 1;
1481}
1482
fa593d66
PA
1483static void
1484append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1485{
1486 write_inferior_memory (*to, buf, len);
1487 *to += len;
1488}
1489
1490static int
1491push_opcode (unsigned char *buf, char *op)
1492{
1493 unsigned char *buf_org = buf;
1494
1495 while (1)
1496 {
1497 char *endptr;
1498 unsigned long ul = strtoul (op, &endptr, 16);
1499
1500 if (endptr == op)
1501 break;
1502
1503 *buf++ = ul;
1504 op = endptr;
1505 }
1506
1507 return buf - buf_org;
1508}
1509
1510#ifdef __x86_64__
1511
1512/* Build a jump pad that saves registers and calls a collection
1513 function. Writes a jump instruction to the jump pad to
1514 JJUMPAD_INSN. The caller is responsible to write it in at the
1515 tracepoint address. */
1516
1517static int
1518amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1519 CORE_ADDR collector,
1520 CORE_ADDR lockaddr,
1521 ULONGEST orig_size,
1522 CORE_ADDR *jump_entry,
405f8e94
SS
1523 CORE_ADDR *trampoline,
1524 ULONGEST *trampoline_size,
fa593d66
PA
1525 unsigned char *jjump_pad_insn,
1526 ULONGEST *jjump_pad_insn_size,
1527 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1528 CORE_ADDR *adjusted_insn_addr_end,
1529 char *err)
fa593d66
PA
1530{
1531 unsigned char buf[40];
1532 int i, offset;
f4647387
YQ
1533 int64_t loffset;
1534
fa593d66
PA
1535 CORE_ADDR buildaddr = *jump_entry;
1536
1537 /* Build the jump pad. */
1538
1539 /* First, do tracepoint data collection. Save registers. */
1540 i = 0;
1541 /* Need to ensure stack pointer saved first. */
1542 buf[i++] = 0x54; /* push %rsp */
1543 buf[i++] = 0x55; /* push %rbp */
1544 buf[i++] = 0x57; /* push %rdi */
1545 buf[i++] = 0x56; /* push %rsi */
1546 buf[i++] = 0x52; /* push %rdx */
1547 buf[i++] = 0x51; /* push %rcx */
1548 buf[i++] = 0x53; /* push %rbx */
1549 buf[i++] = 0x50; /* push %rax */
1550 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1551 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1552 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1553 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1554 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1555 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1556 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1557 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1558 buf[i++] = 0x9c; /* pushfq */
1559 buf[i++] = 0x48; /* movl <addr>,%rdi */
1560 buf[i++] = 0xbf;
1561 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1562 i += sizeof (unsigned long);
1563 buf[i++] = 0x57; /* push %rdi */
1564 append_insns (&buildaddr, i, buf);
1565
1566 /* Stack space for the collecting_t object. */
1567 i = 0;
1568 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1569 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1570 memcpy (buf + i, &tpoint, 8);
1571 i += 8;
1572 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1573 i += push_opcode (&buf[i],
1574 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1575 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1576 append_insns (&buildaddr, i, buf);
1577
1578 /* spin-lock. */
1579 i = 0;
1580 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1581 memcpy (&buf[i], (void *) &lockaddr, 8);
1582 i += 8;
1583 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1584 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1585 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1586 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1587 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1588 append_insns (&buildaddr, i, buf);
1589
1590 /* Set up the gdb_collect call. */
1591 /* At this point, (stack pointer + 0x18) is the base of our saved
1592 register block. */
1593
1594 i = 0;
1595 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1596 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1597
1598 /* tpoint address may be 64-bit wide. */
1599 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1600 memcpy (buf + i, &tpoint, 8);
1601 i += 8;
1602 append_insns (&buildaddr, i, buf);
1603
1604 /* The collector function being in the shared library, may be
1605 >31-bits away off the jump pad. */
1606 i = 0;
1607 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1608 memcpy (buf + i, &collector, 8);
1609 i += 8;
1610 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1611 append_insns (&buildaddr, i, buf);
1612
1613 /* Clear the spin-lock. */
1614 i = 0;
1615 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1616 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1617 memcpy (buf + i, &lockaddr, 8);
1618 i += 8;
1619 append_insns (&buildaddr, i, buf);
1620
1621 /* Remove stack that had been used for the collect_t object. */
1622 i = 0;
1623 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1624 append_insns (&buildaddr, i, buf);
1625
1626 /* Restore register state. */
1627 i = 0;
1628 buf[i++] = 0x48; /* add $0x8,%rsp */
1629 buf[i++] = 0x83;
1630 buf[i++] = 0xc4;
1631 buf[i++] = 0x08;
1632 buf[i++] = 0x9d; /* popfq */
1633 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1634 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1635 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1636 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1637 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1638 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1639 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1640 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1641 buf[i++] = 0x58; /* pop %rax */
1642 buf[i++] = 0x5b; /* pop %rbx */
1643 buf[i++] = 0x59; /* pop %rcx */
1644 buf[i++] = 0x5a; /* pop %rdx */
1645 buf[i++] = 0x5e; /* pop %rsi */
1646 buf[i++] = 0x5f; /* pop %rdi */
1647 buf[i++] = 0x5d; /* pop %rbp */
1648 buf[i++] = 0x5c; /* pop %rsp */
1649 append_insns (&buildaddr, i, buf);
1650
1651 /* Now, adjust the original instruction to execute in the jump
1652 pad. */
1653 *adjusted_insn_addr = buildaddr;
1654 relocate_instruction (&buildaddr, tpaddr);
1655 *adjusted_insn_addr_end = buildaddr;
1656
1657 /* Finally, write a jump back to the program. */
f4647387
YQ
1658
1659 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1660 if (loffset > INT_MAX || loffset < INT_MIN)
1661 {
1662 sprintf (err,
1663 "E.Jump back from jump pad too far from tracepoint "
1664 "(offset 0x%" PRIx64 " > int32).", loffset);
1665 return 1;
1666 }
1667
1668 offset = (int) loffset;
fa593d66
PA
1669 memcpy (buf, jump_insn, sizeof (jump_insn));
1670 memcpy (buf + 1, &offset, 4);
1671 append_insns (&buildaddr, sizeof (jump_insn), buf);
1672
1673 /* The jump pad is now built. Wire in a jump to our jump pad. This
1674 is always done last (by our caller actually), so that we can
1675 install fast tracepoints with threads running. This relies on
1676 the agent's atomic write support. */
f4647387
YQ
1677 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1678 if (loffset > INT_MAX || loffset < INT_MIN)
1679 {
1680 sprintf (err,
1681 "E.Jump pad too far from tracepoint "
1682 "(offset 0x%" PRIx64 " > int32).", loffset);
1683 return 1;
1684 }
1685
1686 offset = (int) loffset;
1687
fa593d66
PA
1688 memcpy (buf, jump_insn, sizeof (jump_insn));
1689 memcpy (buf + 1, &offset, 4);
1690 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1691 *jjump_pad_insn_size = sizeof (jump_insn);
1692
1693 /* Return the end address of our pad. */
1694 *jump_entry = buildaddr;
1695
1696 return 0;
1697}
1698
1699#endif /* __x86_64__ */
1700
1701/* Build a jump pad that saves registers and calls a collection
1702 function. Writes a jump instruction to the jump pad to
1703 JJUMPAD_INSN. The caller is responsible to write it in at the
1704 tracepoint address. */
1705
1706static int
1707i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1708 CORE_ADDR collector,
1709 CORE_ADDR lockaddr,
1710 ULONGEST orig_size,
1711 CORE_ADDR *jump_entry,
405f8e94
SS
1712 CORE_ADDR *trampoline,
1713 ULONGEST *trampoline_size,
fa593d66
PA
1714 unsigned char *jjump_pad_insn,
1715 ULONGEST *jjump_pad_insn_size,
1716 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1717 CORE_ADDR *adjusted_insn_addr_end,
1718 char *err)
fa593d66
PA
1719{
1720 unsigned char buf[0x100];
1721 int i, offset;
1722 CORE_ADDR buildaddr = *jump_entry;
1723
1724 /* Build the jump pad. */
1725
1726 /* First, do tracepoint data collection. Save registers. */
1727 i = 0;
1728 buf[i++] = 0x60; /* pushad */
1729 buf[i++] = 0x68; /* push tpaddr aka $pc */
1730 *((int *)(buf + i)) = (int) tpaddr;
1731 i += 4;
1732 buf[i++] = 0x9c; /* pushf */
1733 buf[i++] = 0x1e; /* push %ds */
1734 buf[i++] = 0x06; /* push %es */
1735 buf[i++] = 0x0f; /* push %fs */
1736 buf[i++] = 0xa0;
1737 buf[i++] = 0x0f; /* push %gs */
1738 buf[i++] = 0xa8;
1739 buf[i++] = 0x16; /* push %ss */
1740 buf[i++] = 0x0e; /* push %cs */
1741 append_insns (&buildaddr, i, buf);
1742
1743 /* Stack space for the collecting_t object. */
1744 i = 0;
1745 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1746
1747 /* Build the object. */
1748 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1749 memcpy (buf + i, &tpoint, 4);
1750 i += 4;
1751 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1752
1753 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1754 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1755 append_insns (&buildaddr, i, buf);
1756
1757 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1758 If we cared for it, this could be using xchg alternatively. */
1759
1760 i = 0;
1761 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1762 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1763 %esp,<lockaddr> */
1764 memcpy (&buf[i], (void *) &lockaddr, 4);
1765 i += 4;
1766 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1767 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1768 append_insns (&buildaddr, i, buf);
1769
1770
1771 /* Set up arguments to the gdb_collect call. */
1772 i = 0;
1773 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1774 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1775 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1776 append_insns (&buildaddr, i, buf);
1777
1778 i = 0;
1779 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1780 append_insns (&buildaddr, i, buf);
1781
1782 i = 0;
1783 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1784 memcpy (&buf[i], (void *) &tpoint, 4);
1785 i += 4;
1786 append_insns (&buildaddr, i, buf);
1787
1788 buf[0] = 0xe8; /* call <reladdr> */
1789 offset = collector - (buildaddr + sizeof (jump_insn));
1790 memcpy (buf + 1, &offset, 4);
1791 append_insns (&buildaddr, 5, buf);
1792 /* Clean up after the call. */
1793 buf[0] = 0x83; /* add $0x8,%esp */
1794 buf[1] = 0xc4;
1795 buf[2] = 0x08;
1796 append_insns (&buildaddr, 3, buf);
1797
1798
1799 /* Clear the spin-lock. This would need the LOCK prefix on older
1800 broken archs. */
1801 i = 0;
1802 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1803 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1804 memcpy (buf + i, &lockaddr, 4);
1805 i += 4;
1806 append_insns (&buildaddr, i, buf);
1807
1808
1809 /* Remove stack that had been used for the collect_t object. */
1810 i = 0;
1811 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1812 append_insns (&buildaddr, i, buf);
1813
1814 i = 0;
1815 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1816 buf[i++] = 0xc4;
1817 buf[i++] = 0x04;
1818 buf[i++] = 0x17; /* pop %ss */
1819 buf[i++] = 0x0f; /* pop %gs */
1820 buf[i++] = 0xa9;
1821 buf[i++] = 0x0f; /* pop %fs */
1822 buf[i++] = 0xa1;
1823 buf[i++] = 0x07; /* pop %es */
405f8e94 1824 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1825 buf[i++] = 0x9d; /* popf */
1826 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1827 buf[i++] = 0xc4;
1828 buf[i++] = 0x04;
1829 buf[i++] = 0x61; /* popad */
1830 append_insns (&buildaddr, i, buf);
1831
1832 /* Now, adjust the original instruction to execute in the jump
1833 pad. */
1834 *adjusted_insn_addr = buildaddr;
1835 relocate_instruction (&buildaddr, tpaddr);
1836 *adjusted_insn_addr_end = buildaddr;
1837
1838 /* Write the jump back to the program. */
1839 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1840 memcpy (buf, jump_insn, sizeof (jump_insn));
1841 memcpy (buf + 1, &offset, 4);
1842 append_insns (&buildaddr, sizeof (jump_insn), buf);
1843
1844 /* The jump pad is now built. Wire in a jump to our jump pad. This
1845 is always done last (by our caller actually), so that we can
1846 install fast tracepoints with threads running. This relies on
1847 the agent's atomic write support. */
405f8e94
SS
1848 if (orig_size == 4)
1849 {
1850 /* Create a trampoline. */
1851 *trampoline_size = sizeof (jump_insn);
1852 if (!claim_trampoline_space (*trampoline_size, trampoline))
1853 {
1854 /* No trampoline space available. */
1855 strcpy (err,
1856 "E.Cannot allocate trampoline space needed for fast "
1857 "tracepoints on 4-byte instructions.");
1858 return 1;
1859 }
1860
1861 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1862 memcpy (buf, jump_insn, sizeof (jump_insn));
1863 memcpy (buf + 1, &offset, 4);
1864 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1865
1866 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1867 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1868 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1869 memcpy (buf + 2, &offset, 2);
1870 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1871 *jjump_pad_insn_size = sizeof (small_jump_insn);
1872 }
1873 else
1874 {
1875 /* Else use a 32-bit relative jump instruction. */
1876 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1877 memcpy (buf, jump_insn, sizeof (jump_insn));
1878 memcpy (buf + 1, &offset, 4);
1879 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1880 *jjump_pad_insn_size = sizeof (jump_insn);
1881 }
fa593d66
PA
1882
1883 /* Return the end address of our pad. */
1884 *jump_entry = buildaddr;
1885
1886 return 0;
1887}
1888
1889static int
1890x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1891 CORE_ADDR collector,
1892 CORE_ADDR lockaddr,
1893 ULONGEST orig_size,
1894 CORE_ADDR *jump_entry,
405f8e94
SS
1895 CORE_ADDR *trampoline,
1896 ULONGEST *trampoline_size,
fa593d66
PA
1897 unsigned char *jjump_pad_insn,
1898 ULONGEST *jjump_pad_insn_size,
1899 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1900 CORE_ADDR *adjusted_insn_addr_end,
1901 char *err)
fa593d66
PA
1902{
1903#ifdef __x86_64__
3aee8918 1904 if (is_64bit_tdesc ())
fa593d66
PA
1905 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1906 collector, lockaddr,
1907 orig_size, jump_entry,
405f8e94 1908 trampoline, trampoline_size,
fa593d66
PA
1909 jjump_pad_insn,
1910 jjump_pad_insn_size,
1911 adjusted_insn_addr,
405f8e94
SS
1912 adjusted_insn_addr_end,
1913 err);
fa593d66
PA
1914#endif
1915
1916 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1917 collector, lockaddr,
1918 orig_size, jump_entry,
405f8e94 1919 trampoline, trampoline_size,
fa593d66
PA
1920 jjump_pad_insn,
1921 jjump_pad_insn_size,
1922 adjusted_insn_addr,
405f8e94
SS
1923 adjusted_insn_addr_end,
1924 err);
1925}
1926
1927/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1928 architectures. */
1929
1930static int
1931x86_get_min_fast_tracepoint_insn_len (void)
1932{
1933 static int warned_about_fast_tracepoints = 0;
1934
1935#ifdef __x86_64__
1936 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1937 used for fast tracepoints. */
3aee8918 1938 if (is_64bit_tdesc ())
405f8e94
SS
1939 return 5;
1940#endif
1941
58b4daa5 1942 if (agent_loaded_p ())
405f8e94
SS
1943 {
1944 char errbuf[IPA_BUFSIZ];
1945
1946 errbuf[0] = '\0';
1947
1948 /* On x86, if trampolines are available, then 4-byte jump instructions
1949 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1950 with a 4-byte offset are used instead. */
1951 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1952 return 4;
1953 else
1954 {
1955 /* GDB has no channel to explain to user why a shorter fast
1956 tracepoint is not possible, but at least make GDBserver
1957 mention that something has gone awry. */
1958 if (!warned_about_fast_tracepoints)
1959 {
1960 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1961 warned_about_fast_tracepoints = 1;
1962 }
1963 return 5;
1964 }
1965 }
1966 else
1967 {
1968 /* Indicate that the minimum length is currently unknown since the IPA
1969 has not loaded yet. */
1970 return 0;
1971 }
fa593d66
PA
1972}
1973
6a271cae
PA
1974static void
1975add_insns (unsigned char *start, int len)
1976{
1977 CORE_ADDR buildaddr = current_insn_ptr;
1978
1979 if (debug_threads)
1980 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1981 len, paddress (buildaddr));
1982
1983 append_insns (&buildaddr, len, start);
1984 current_insn_ptr = buildaddr;
1985}
1986
6a271cae
PA
1987/* Our general strategy for emitting code is to avoid specifying raw
1988 bytes whenever possible, and instead copy a block of inline asm
1989 that is embedded in the function. This is a little messy, because
1990 we need to keep the compiler from discarding what looks like dead
1991 code, plus suppress various warnings. */
1992
9e4344e5
PA
1993#define EMIT_ASM(NAME, INSNS) \
1994 do \
1995 { \
1996 extern unsigned char start_ ## NAME, end_ ## NAME; \
1997 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1998 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1999 "\t" "start_" #NAME ":" \
2000 "\t" INSNS "\n" \
2001 "\t" "end_" #NAME ":"); \
2002 } while (0)
6a271cae
PA
2003
2004#ifdef __x86_64__
2005
2006#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
2007 do \
2008 { \
2009 extern unsigned char start_ ## NAME, end_ ## NAME; \
2010 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2011 __asm__ (".code32\n" \
2012 "\t" "jmp end_" #NAME "\n" \
2013 "\t" "start_" #NAME ":\n" \
2014 "\t" INSNS "\n" \
2015 "\t" "end_" #NAME ":\n" \
2016 ".code64\n"); \
2017 } while (0)
6a271cae
PA
2018
2019#else
2020
2021#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2022
2023#endif
2024
2025#ifdef __x86_64__
2026
2027static void
2028amd64_emit_prologue (void)
2029{
2030 EMIT_ASM (amd64_prologue,
2031 "pushq %rbp\n\t"
2032 "movq %rsp,%rbp\n\t"
2033 "sub $0x20,%rsp\n\t"
2034 "movq %rdi,-8(%rbp)\n\t"
2035 "movq %rsi,-16(%rbp)");
2036}
2037
2038
2039static void
2040amd64_emit_epilogue (void)
2041{
2042 EMIT_ASM (amd64_epilogue,
2043 "movq -16(%rbp),%rdi\n\t"
2044 "movq %rax,(%rdi)\n\t"
2045 "xor %rax,%rax\n\t"
2046 "leave\n\t"
2047 "ret");
2048}
2049
2050static void
2051amd64_emit_add (void)
2052{
2053 EMIT_ASM (amd64_add,
2054 "add (%rsp),%rax\n\t"
2055 "lea 0x8(%rsp),%rsp");
2056}
2057
2058static void
2059amd64_emit_sub (void)
2060{
2061 EMIT_ASM (amd64_sub,
2062 "sub %rax,(%rsp)\n\t"
2063 "pop %rax");
2064}
2065
2066static void
2067amd64_emit_mul (void)
2068{
2069 emit_error = 1;
2070}
2071
2072static void
2073amd64_emit_lsh (void)
2074{
2075 emit_error = 1;
2076}
2077
2078static void
2079amd64_emit_rsh_signed (void)
2080{
2081 emit_error = 1;
2082}
2083
2084static void
2085amd64_emit_rsh_unsigned (void)
2086{
2087 emit_error = 1;
2088}
2089
2090static void
2091amd64_emit_ext (int arg)
2092{
2093 switch (arg)
2094 {
2095 case 8:
2096 EMIT_ASM (amd64_ext_8,
2097 "cbtw\n\t"
2098 "cwtl\n\t"
2099 "cltq");
2100 break;
2101 case 16:
2102 EMIT_ASM (amd64_ext_16,
2103 "cwtl\n\t"
2104 "cltq");
2105 break;
2106 case 32:
2107 EMIT_ASM (amd64_ext_32,
2108 "cltq");
2109 break;
2110 default:
2111 emit_error = 1;
2112 }
2113}
2114
2115static void
2116amd64_emit_log_not (void)
2117{
2118 EMIT_ASM (amd64_log_not,
2119 "test %rax,%rax\n\t"
2120 "sete %cl\n\t"
2121 "movzbq %cl,%rax");
2122}
2123
2124static void
2125amd64_emit_bit_and (void)
2126{
2127 EMIT_ASM (amd64_and,
2128 "and (%rsp),%rax\n\t"
2129 "lea 0x8(%rsp),%rsp");
2130}
2131
2132static void
2133amd64_emit_bit_or (void)
2134{
2135 EMIT_ASM (amd64_or,
2136 "or (%rsp),%rax\n\t"
2137 "lea 0x8(%rsp),%rsp");
2138}
2139
2140static void
2141amd64_emit_bit_xor (void)
2142{
2143 EMIT_ASM (amd64_xor,
2144 "xor (%rsp),%rax\n\t"
2145 "lea 0x8(%rsp),%rsp");
2146}
2147
2148static void
2149amd64_emit_bit_not (void)
2150{
2151 EMIT_ASM (amd64_bit_not,
2152 "xorq $0xffffffffffffffff,%rax");
2153}
2154
2155static void
2156amd64_emit_equal (void)
2157{
2158 EMIT_ASM (amd64_equal,
2159 "cmp %rax,(%rsp)\n\t"
2160 "je .Lamd64_equal_true\n\t"
2161 "xor %rax,%rax\n\t"
2162 "jmp .Lamd64_equal_end\n\t"
2163 ".Lamd64_equal_true:\n\t"
2164 "mov $0x1,%rax\n\t"
2165 ".Lamd64_equal_end:\n\t"
2166 "lea 0x8(%rsp),%rsp");
2167}
2168
2169static void
2170amd64_emit_less_signed (void)
2171{
2172 EMIT_ASM (amd64_less_signed,
2173 "cmp %rax,(%rsp)\n\t"
2174 "jl .Lamd64_less_signed_true\n\t"
2175 "xor %rax,%rax\n\t"
2176 "jmp .Lamd64_less_signed_end\n\t"
2177 ".Lamd64_less_signed_true:\n\t"
2178 "mov $1,%rax\n\t"
2179 ".Lamd64_less_signed_end:\n\t"
2180 "lea 0x8(%rsp),%rsp");
2181}
2182
2183static void
2184amd64_emit_less_unsigned (void)
2185{
2186 EMIT_ASM (amd64_less_unsigned,
2187 "cmp %rax,(%rsp)\n\t"
2188 "jb .Lamd64_less_unsigned_true\n\t"
2189 "xor %rax,%rax\n\t"
2190 "jmp .Lamd64_less_unsigned_end\n\t"
2191 ".Lamd64_less_unsigned_true:\n\t"
2192 "mov $1,%rax\n\t"
2193 ".Lamd64_less_unsigned_end:\n\t"
2194 "lea 0x8(%rsp),%rsp");
2195}
2196
2197static void
2198amd64_emit_ref (int size)
2199{
2200 switch (size)
2201 {
2202 case 1:
2203 EMIT_ASM (amd64_ref1,
2204 "movb (%rax),%al");
2205 break;
2206 case 2:
2207 EMIT_ASM (amd64_ref2,
2208 "movw (%rax),%ax");
2209 break;
2210 case 4:
2211 EMIT_ASM (amd64_ref4,
2212 "movl (%rax),%eax");
2213 break;
2214 case 8:
2215 EMIT_ASM (amd64_ref8,
2216 "movq (%rax),%rax");
2217 break;
2218 }
2219}
2220
2221static void
2222amd64_emit_if_goto (int *offset_p, int *size_p)
2223{
2224 EMIT_ASM (amd64_if_goto,
2225 "mov %rax,%rcx\n\t"
2226 "pop %rax\n\t"
2227 "cmp $0,%rcx\n\t"
2228 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2229 if (offset_p)
2230 *offset_p = 10;
2231 if (size_p)
2232 *size_p = 4;
2233}
2234
2235static void
2236amd64_emit_goto (int *offset_p, int *size_p)
2237{
2238 EMIT_ASM (amd64_goto,
2239 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2240 if (offset_p)
2241 *offset_p = 1;
2242 if (size_p)
2243 *size_p = 4;
2244}
2245
2246static void
2247amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2248{
2249 int diff = (to - (from + size));
2250 unsigned char buf[sizeof (int)];
2251
2252 if (size != 4)
2253 {
2254 emit_error = 1;
2255 return;
2256 }
2257
2258 memcpy (buf, &diff, sizeof (int));
2259 write_inferior_memory (from, buf, sizeof (int));
2260}
2261
2262static void
4e29fb54 2263amd64_emit_const (LONGEST num)
6a271cae
PA
2264{
2265 unsigned char buf[16];
2266 int i;
2267 CORE_ADDR buildaddr = current_insn_ptr;
2268
2269 i = 0;
2270 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2271 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2272 i += 8;
2273 append_insns (&buildaddr, i, buf);
2274 current_insn_ptr = buildaddr;
2275}
2276
2277static void
2278amd64_emit_call (CORE_ADDR fn)
2279{
2280 unsigned char buf[16];
2281 int i;
2282 CORE_ADDR buildaddr;
4e29fb54 2283 LONGEST offset64;
6a271cae
PA
2284
2285 /* The destination function being in the shared library, may be
2286 >31-bits away off the compiled code pad. */
2287
2288 buildaddr = current_insn_ptr;
2289
2290 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2291
2292 i = 0;
2293
2294 if (offset64 > INT_MAX || offset64 < INT_MIN)
2295 {
2296 /* Offset is too large for a call. Use callq, but that requires
2297 a register, so avoid it if possible. Use r10, since it is
2298 call-clobbered, we don't have to push/pop it. */
2299 buf[i++] = 0x48; /* mov $fn,%r10 */
2300 buf[i++] = 0xba;
2301 memcpy (buf + i, &fn, 8);
2302 i += 8;
2303 buf[i++] = 0xff; /* callq *%r10 */
2304 buf[i++] = 0xd2;
2305 }
2306 else
2307 {
2308 int offset32 = offset64; /* we know we can't overflow here. */
2309 memcpy (buf + i, &offset32, 4);
2310 i += 4;
2311 }
2312
2313 append_insns (&buildaddr, i, buf);
2314 current_insn_ptr = buildaddr;
2315}
2316
2317static void
2318amd64_emit_reg (int reg)
2319{
2320 unsigned char buf[16];
2321 int i;
2322 CORE_ADDR buildaddr;
2323
2324 /* Assume raw_regs is still in %rdi. */
2325 buildaddr = current_insn_ptr;
2326 i = 0;
2327 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2328 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2329 i += 4;
2330 append_insns (&buildaddr, i, buf);
2331 current_insn_ptr = buildaddr;
2332 amd64_emit_call (get_raw_reg_func_addr ());
2333}
2334
2335static void
2336amd64_emit_pop (void)
2337{
2338 EMIT_ASM (amd64_pop,
2339 "pop %rax");
2340}
2341
2342static void
2343amd64_emit_stack_flush (void)
2344{
2345 EMIT_ASM (amd64_stack_flush,
2346 "push %rax");
2347}
2348
2349static void
2350amd64_emit_zero_ext (int arg)
2351{
2352 switch (arg)
2353 {
2354 case 8:
2355 EMIT_ASM (amd64_zero_ext_8,
2356 "and $0xff,%rax");
2357 break;
2358 case 16:
2359 EMIT_ASM (amd64_zero_ext_16,
2360 "and $0xffff,%rax");
2361 break;
2362 case 32:
2363 EMIT_ASM (amd64_zero_ext_32,
2364 "mov $0xffffffff,%rcx\n\t"
2365 "and %rcx,%rax");
2366 break;
2367 default:
2368 emit_error = 1;
2369 }
2370}
2371
2372static void
2373amd64_emit_swap (void)
2374{
2375 EMIT_ASM (amd64_swap,
2376 "mov %rax,%rcx\n\t"
2377 "pop %rax\n\t"
2378 "push %rcx");
2379}
2380
2381static void
2382amd64_emit_stack_adjust (int n)
2383{
2384 unsigned char buf[16];
2385 int i;
2386 CORE_ADDR buildaddr = current_insn_ptr;
2387
2388 i = 0;
2389 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2390 buf[i++] = 0x8d;
2391 buf[i++] = 0x64;
2392 buf[i++] = 0x24;
2393 /* This only handles adjustments up to 16, but we don't expect any more. */
2394 buf[i++] = n * 8;
2395 append_insns (&buildaddr, i, buf);
2396 current_insn_ptr = buildaddr;
2397}
2398
2399/* FN's prototype is `LONGEST(*fn)(int)'. */
2400
2401static void
2402amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2403{
2404 unsigned char buf[16];
2405 int i;
2406 CORE_ADDR buildaddr;
2407
2408 buildaddr = current_insn_ptr;
2409 i = 0;
2410 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2411 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2412 i += 4;
2413 append_insns (&buildaddr, i, buf);
2414 current_insn_ptr = buildaddr;
2415 amd64_emit_call (fn);
2416}
2417
4e29fb54 2418/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2419
2420static void
2421amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2422{
2423 unsigned char buf[16];
2424 int i;
2425 CORE_ADDR buildaddr;
2426
2427 buildaddr = current_insn_ptr;
2428 i = 0;
2429 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2430 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2431 i += 4;
2432 append_insns (&buildaddr, i, buf);
2433 current_insn_ptr = buildaddr;
2434 EMIT_ASM (amd64_void_call_2_a,
2435 /* Save away a copy of the stack top. */
2436 "push %rax\n\t"
2437 /* Also pass top as the second argument. */
2438 "mov %rax,%rsi");
2439 amd64_emit_call (fn);
2440 EMIT_ASM (amd64_void_call_2_b,
2441 /* Restore the stack top, %rax may have been trashed. */
2442 "pop %rax");
2443}
2444
6b9801d4
SS
2445void
2446amd64_emit_eq_goto (int *offset_p, int *size_p)
2447{
2448 EMIT_ASM (amd64_eq,
2449 "cmp %rax,(%rsp)\n\t"
2450 "jne .Lamd64_eq_fallthru\n\t"
2451 "lea 0x8(%rsp),%rsp\n\t"
2452 "pop %rax\n\t"
2453 /* jmp, but don't trust the assembler to choose the right jump */
2454 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2455 ".Lamd64_eq_fallthru:\n\t"
2456 "lea 0x8(%rsp),%rsp\n\t"
2457 "pop %rax");
2458
2459 if (offset_p)
2460 *offset_p = 13;
2461 if (size_p)
2462 *size_p = 4;
2463}
2464
2465void
2466amd64_emit_ne_goto (int *offset_p, int *size_p)
2467{
2468 EMIT_ASM (amd64_ne,
2469 "cmp %rax,(%rsp)\n\t"
2470 "je .Lamd64_ne_fallthru\n\t"
2471 "lea 0x8(%rsp),%rsp\n\t"
2472 "pop %rax\n\t"
2473 /* jmp, but don't trust the assembler to choose the right jump */
2474 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2475 ".Lamd64_ne_fallthru:\n\t"
2476 "lea 0x8(%rsp),%rsp\n\t"
2477 "pop %rax");
2478
2479 if (offset_p)
2480 *offset_p = 13;
2481 if (size_p)
2482 *size_p = 4;
2483}
2484
2485void
2486amd64_emit_lt_goto (int *offset_p, int *size_p)
2487{
2488 EMIT_ASM (amd64_lt,
2489 "cmp %rax,(%rsp)\n\t"
2490 "jnl .Lamd64_lt_fallthru\n\t"
2491 "lea 0x8(%rsp),%rsp\n\t"
2492 "pop %rax\n\t"
2493 /* jmp, but don't trust the assembler to choose the right jump */
2494 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2495 ".Lamd64_lt_fallthru:\n\t"
2496 "lea 0x8(%rsp),%rsp\n\t"
2497 "pop %rax");
2498
2499 if (offset_p)
2500 *offset_p = 13;
2501 if (size_p)
2502 *size_p = 4;
2503}
2504
2505void
2506amd64_emit_le_goto (int *offset_p, int *size_p)
2507{
2508 EMIT_ASM (amd64_le,
2509 "cmp %rax,(%rsp)\n\t"
2510 "jnle .Lamd64_le_fallthru\n\t"
2511 "lea 0x8(%rsp),%rsp\n\t"
2512 "pop %rax\n\t"
2513 /* jmp, but don't trust the assembler to choose the right jump */
2514 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2515 ".Lamd64_le_fallthru:\n\t"
2516 "lea 0x8(%rsp),%rsp\n\t"
2517 "pop %rax");
2518
2519 if (offset_p)
2520 *offset_p = 13;
2521 if (size_p)
2522 *size_p = 4;
2523}
2524
2525void
2526amd64_emit_gt_goto (int *offset_p, int *size_p)
2527{
2528 EMIT_ASM (amd64_gt,
2529 "cmp %rax,(%rsp)\n\t"
2530 "jng .Lamd64_gt_fallthru\n\t"
2531 "lea 0x8(%rsp),%rsp\n\t"
2532 "pop %rax\n\t"
2533 /* jmp, but don't trust the assembler to choose the right jump */
2534 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2535 ".Lamd64_gt_fallthru:\n\t"
2536 "lea 0x8(%rsp),%rsp\n\t"
2537 "pop %rax");
2538
2539 if (offset_p)
2540 *offset_p = 13;
2541 if (size_p)
2542 *size_p = 4;
2543}
2544
2545void
2546amd64_emit_ge_goto (int *offset_p, int *size_p)
2547{
2548 EMIT_ASM (amd64_ge,
2549 "cmp %rax,(%rsp)\n\t"
2550 "jnge .Lamd64_ge_fallthru\n\t"
2551 ".Lamd64_ge_jump:\n\t"
2552 "lea 0x8(%rsp),%rsp\n\t"
2553 "pop %rax\n\t"
2554 /* jmp, but don't trust the assembler to choose the right jump */
2555 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2556 ".Lamd64_ge_fallthru:\n\t"
2557 "lea 0x8(%rsp),%rsp\n\t"
2558 "pop %rax");
2559
2560 if (offset_p)
2561 *offset_p = 13;
2562 if (size_p)
2563 *size_p = 4;
2564}
2565
6a271cae
PA
2566struct emit_ops amd64_emit_ops =
2567 {
2568 amd64_emit_prologue,
2569 amd64_emit_epilogue,
2570 amd64_emit_add,
2571 amd64_emit_sub,
2572 amd64_emit_mul,
2573 amd64_emit_lsh,
2574 amd64_emit_rsh_signed,
2575 amd64_emit_rsh_unsigned,
2576 amd64_emit_ext,
2577 amd64_emit_log_not,
2578 amd64_emit_bit_and,
2579 amd64_emit_bit_or,
2580 amd64_emit_bit_xor,
2581 amd64_emit_bit_not,
2582 amd64_emit_equal,
2583 amd64_emit_less_signed,
2584 amd64_emit_less_unsigned,
2585 amd64_emit_ref,
2586 amd64_emit_if_goto,
2587 amd64_emit_goto,
2588 amd64_write_goto_address,
2589 amd64_emit_const,
2590 amd64_emit_call,
2591 amd64_emit_reg,
2592 amd64_emit_pop,
2593 amd64_emit_stack_flush,
2594 amd64_emit_zero_ext,
2595 amd64_emit_swap,
2596 amd64_emit_stack_adjust,
2597 amd64_emit_int_call_1,
6b9801d4
SS
2598 amd64_emit_void_call_2,
2599 amd64_emit_eq_goto,
2600 amd64_emit_ne_goto,
2601 amd64_emit_lt_goto,
2602 amd64_emit_le_goto,
2603 amd64_emit_gt_goto,
2604 amd64_emit_ge_goto
6a271cae
PA
2605 };
2606
2607#endif /* __x86_64__ */
2608
2609static void
2610i386_emit_prologue (void)
2611{
2612 EMIT_ASM32 (i386_prologue,
2613 "push %ebp\n\t"
bf15cbda
SS
2614 "mov %esp,%ebp\n\t"
2615 "push %ebx");
6a271cae
PA
2616 /* At this point, the raw regs base address is at 8(%ebp), and the
2617 value pointer is at 12(%ebp). */
2618}
2619
2620static void
2621i386_emit_epilogue (void)
2622{
2623 EMIT_ASM32 (i386_epilogue,
2624 "mov 12(%ebp),%ecx\n\t"
2625 "mov %eax,(%ecx)\n\t"
2626 "mov %ebx,0x4(%ecx)\n\t"
2627 "xor %eax,%eax\n\t"
bf15cbda 2628 "pop %ebx\n\t"
6a271cae
PA
2629 "pop %ebp\n\t"
2630 "ret");
2631}
2632
2633static void
2634i386_emit_add (void)
2635{
2636 EMIT_ASM32 (i386_add,
2637 "add (%esp),%eax\n\t"
2638 "adc 0x4(%esp),%ebx\n\t"
2639 "lea 0x8(%esp),%esp");
2640}
2641
2642static void
2643i386_emit_sub (void)
2644{
2645 EMIT_ASM32 (i386_sub,
2646 "subl %eax,(%esp)\n\t"
2647 "sbbl %ebx,4(%esp)\n\t"
2648 "pop %eax\n\t"
2649 "pop %ebx\n\t");
2650}
2651
2652static void
2653i386_emit_mul (void)
2654{
2655 emit_error = 1;
2656}
2657
2658static void
2659i386_emit_lsh (void)
2660{
2661 emit_error = 1;
2662}
2663
2664static void
2665i386_emit_rsh_signed (void)
2666{
2667 emit_error = 1;
2668}
2669
2670static void
2671i386_emit_rsh_unsigned (void)
2672{
2673 emit_error = 1;
2674}
2675
2676static void
2677i386_emit_ext (int arg)
2678{
2679 switch (arg)
2680 {
2681 case 8:
2682 EMIT_ASM32 (i386_ext_8,
2683 "cbtw\n\t"
2684 "cwtl\n\t"
2685 "movl %eax,%ebx\n\t"
2686 "sarl $31,%ebx");
2687 break;
2688 case 16:
2689 EMIT_ASM32 (i386_ext_16,
2690 "cwtl\n\t"
2691 "movl %eax,%ebx\n\t"
2692 "sarl $31,%ebx");
2693 break;
2694 case 32:
2695 EMIT_ASM32 (i386_ext_32,
2696 "movl %eax,%ebx\n\t"
2697 "sarl $31,%ebx");
2698 break;
2699 default:
2700 emit_error = 1;
2701 }
2702}
2703
2704static void
2705i386_emit_log_not (void)
2706{
2707 EMIT_ASM32 (i386_log_not,
2708 "or %ebx,%eax\n\t"
2709 "test %eax,%eax\n\t"
2710 "sete %cl\n\t"
2711 "xor %ebx,%ebx\n\t"
2712 "movzbl %cl,%eax");
2713}
2714
2715static void
2716i386_emit_bit_and (void)
2717{
2718 EMIT_ASM32 (i386_and,
2719 "and (%esp),%eax\n\t"
2720 "and 0x4(%esp),%ebx\n\t"
2721 "lea 0x8(%esp),%esp");
2722}
2723
2724static void
2725i386_emit_bit_or (void)
2726{
2727 EMIT_ASM32 (i386_or,
2728 "or (%esp),%eax\n\t"
2729 "or 0x4(%esp),%ebx\n\t"
2730 "lea 0x8(%esp),%esp");
2731}
2732
2733static void
2734i386_emit_bit_xor (void)
2735{
2736 EMIT_ASM32 (i386_xor,
2737 "xor (%esp),%eax\n\t"
2738 "xor 0x4(%esp),%ebx\n\t"
2739 "lea 0x8(%esp),%esp");
2740}
2741
2742static void
2743i386_emit_bit_not (void)
2744{
2745 EMIT_ASM32 (i386_bit_not,
2746 "xor $0xffffffff,%eax\n\t"
2747 "xor $0xffffffff,%ebx\n\t");
2748}
2749
2750static void
2751i386_emit_equal (void)
2752{
2753 EMIT_ASM32 (i386_equal,
2754 "cmpl %ebx,4(%esp)\n\t"
2755 "jne .Li386_equal_false\n\t"
2756 "cmpl %eax,(%esp)\n\t"
2757 "je .Li386_equal_true\n\t"
2758 ".Li386_equal_false:\n\t"
2759 "xor %eax,%eax\n\t"
2760 "jmp .Li386_equal_end\n\t"
2761 ".Li386_equal_true:\n\t"
2762 "mov $1,%eax\n\t"
2763 ".Li386_equal_end:\n\t"
2764 "xor %ebx,%ebx\n\t"
2765 "lea 0x8(%esp),%esp");
2766}
2767
2768static void
2769i386_emit_less_signed (void)
2770{
2771 EMIT_ASM32 (i386_less_signed,
2772 "cmpl %ebx,4(%esp)\n\t"
2773 "jl .Li386_less_signed_true\n\t"
2774 "jne .Li386_less_signed_false\n\t"
2775 "cmpl %eax,(%esp)\n\t"
2776 "jl .Li386_less_signed_true\n\t"
2777 ".Li386_less_signed_false:\n\t"
2778 "xor %eax,%eax\n\t"
2779 "jmp .Li386_less_signed_end\n\t"
2780 ".Li386_less_signed_true:\n\t"
2781 "mov $1,%eax\n\t"
2782 ".Li386_less_signed_end:\n\t"
2783 "xor %ebx,%ebx\n\t"
2784 "lea 0x8(%esp),%esp");
2785}
2786
2787static void
2788i386_emit_less_unsigned (void)
2789{
2790 EMIT_ASM32 (i386_less_unsigned,
2791 "cmpl %ebx,4(%esp)\n\t"
2792 "jb .Li386_less_unsigned_true\n\t"
2793 "jne .Li386_less_unsigned_false\n\t"
2794 "cmpl %eax,(%esp)\n\t"
2795 "jb .Li386_less_unsigned_true\n\t"
2796 ".Li386_less_unsigned_false:\n\t"
2797 "xor %eax,%eax\n\t"
2798 "jmp .Li386_less_unsigned_end\n\t"
2799 ".Li386_less_unsigned_true:\n\t"
2800 "mov $1,%eax\n\t"
2801 ".Li386_less_unsigned_end:\n\t"
2802 "xor %ebx,%ebx\n\t"
2803 "lea 0x8(%esp),%esp");
2804}
2805
2806static void
2807i386_emit_ref (int size)
2808{
2809 switch (size)
2810 {
2811 case 1:
2812 EMIT_ASM32 (i386_ref1,
2813 "movb (%eax),%al");
2814 break;
2815 case 2:
2816 EMIT_ASM32 (i386_ref2,
2817 "movw (%eax),%ax");
2818 break;
2819 case 4:
2820 EMIT_ASM32 (i386_ref4,
2821 "movl (%eax),%eax");
2822 break;
2823 case 8:
2824 EMIT_ASM32 (i386_ref8,
2825 "movl 4(%eax),%ebx\n\t"
2826 "movl (%eax),%eax");
2827 break;
2828 }
2829}
2830
2831static void
2832i386_emit_if_goto (int *offset_p, int *size_p)
2833{
2834 EMIT_ASM32 (i386_if_goto,
2835 "mov %eax,%ecx\n\t"
2836 "or %ebx,%ecx\n\t"
2837 "pop %eax\n\t"
2838 "pop %ebx\n\t"
2839 "cmpl $0,%ecx\n\t"
2840 /* Don't trust the assembler to choose the right jump */
2841 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2842
2843 if (offset_p)
2844 *offset_p = 11; /* be sure that this matches the sequence above */
2845 if (size_p)
2846 *size_p = 4;
2847}
2848
2849static void
2850i386_emit_goto (int *offset_p, int *size_p)
2851{
2852 EMIT_ASM32 (i386_goto,
2853 /* Don't trust the assembler to choose the right jump */
2854 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2855 if (offset_p)
2856 *offset_p = 1;
2857 if (size_p)
2858 *size_p = 4;
2859}
2860
2861static void
2862i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2863{
2864 int diff = (to - (from + size));
2865 unsigned char buf[sizeof (int)];
2866
2867 /* We're only doing 4-byte sizes at the moment. */
2868 if (size != 4)
2869 {
2870 emit_error = 1;
2871 return;
2872 }
2873
2874 memcpy (buf, &diff, sizeof (int));
2875 write_inferior_memory (from, buf, sizeof (int));
2876}
2877
2878static void
4e29fb54 2879i386_emit_const (LONGEST num)
6a271cae
PA
2880{
2881 unsigned char buf[16];
b00ad6ff 2882 int i, hi, lo;
6a271cae
PA
2883 CORE_ADDR buildaddr = current_insn_ptr;
2884
2885 i = 0;
2886 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2887 lo = num & 0xffffffff;
2888 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2889 i += 4;
2890 hi = ((num >> 32) & 0xffffffff);
2891 if (hi)
2892 {
2893 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2894 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2895 i += 4;
2896 }
2897 else
2898 {
2899 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2900 }
2901 append_insns (&buildaddr, i, buf);
2902 current_insn_ptr = buildaddr;
2903}
2904
2905static void
2906i386_emit_call (CORE_ADDR fn)
2907{
2908 unsigned char buf[16];
2909 int i, offset;
2910 CORE_ADDR buildaddr;
2911
2912 buildaddr = current_insn_ptr;
2913 i = 0;
2914 buf[i++] = 0xe8; /* call <reladdr> */
2915 offset = ((int) fn) - (buildaddr + 5);
2916 memcpy (buf + 1, &offset, 4);
2917 append_insns (&buildaddr, 5, buf);
2918 current_insn_ptr = buildaddr;
2919}
2920
2921static void
2922i386_emit_reg (int reg)
2923{
2924 unsigned char buf[16];
2925 int i;
2926 CORE_ADDR buildaddr;
2927
2928 EMIT_ASM32 (i386_reg_a,
2929 "sub $0x8,%esp");
2930 buildaddr = current_insn_ptr;
2931 i = 0;
2932 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2933 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2934 i += 4;
2935 append_insns (&buildaddr, i, buf);
2936 current_insn_ptr = buildaddr;
2937 EMIT_ASM32 (i386_reg_b,
2938 "mov %eax,4(%esp)\n\t"
2939 "mov 8(%ebp),%eax\n\t"
2940 "mov %eax,(%esp)");
2941 i386_emit_call (get_raw_reg_func_addr ());
2942 EMIT_ASM32 (i386_reg_c,
2943 "xor %ebx,%ebx\n\t"
2944 "lea 0x8(%esp),%esp");
2945}
2946
2947static void
2948i386_emit_pop (void)
2949{
2950 EMIT_ASM32 (i386_pop,
2951 "pop %eax\n\t"
2952 "pop %ebx");
2953}
2954
2955static void
2956i386_emit_stack_flush (void)
2957{
2958 EMIT_ASM32 (i386_stack_flush,
2959 "push %ebx\n\t"
2960 "push %eax");
2961}
2962
2963static void
2964i386_emit_zero_ext (int arg)
2965{
2966 switch (arg)
2967 {
2968 case 8:
2969 EMIT_ASM32 (i386_zero_ext_8,
2970 "and $0xff,%eax\n\t"
2971 "xor %ebx,%ebx");
2972 break;
2973 case 16:
2974 EMIT_ASM32 (i386_zero_ext_16,
2975 "and $0xffff,%eax\n\t"
2976 "xor %ebx,%ebx");
2977 break;
2978 case 32:
2979 EMIT_ASM32 (i386_zero_ext_32,
2980 "xor %ebx,%ebx");
2981 break;
2982 default:
2983 emit_error = 1;
2984 }
2985}
2986
2987static void
2988i386_emit_swap (void)
2989{
2990 EMIT_ASM32 (i386_swap,
2991 "mov %eax,%ecx\n\t"
2992 "mov %ebx,%edx\n\t"
2993 "pop %eax\n\t"
2994 "pop %ebx\n\t"
2995 "push %edx\n\t"
2996 "push %ecx");
2997}
2998
2999static void
3000i386_emit_stack_adjust (int n)
3001{
3002 unsigned char buf[16];
3003 int i;
3004 CORE_ADDR buildaddr = current_insn_ptr;
3005
3006 i = 0;
3007 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3008 buf[i++] = 0x64;
3009 buf[i++] = 0x24;
3010 buf[i++] = n * 8;
3011 append_insns (&buildaddr, i, buf);
3012 current_insn_ptr = buildaddr;
3013}
3014
3015/* FN's prototype is `LONGEST(*fn)(int)'. */
3016
3017static void
3018i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3019{
3020 unsigned char buf[16];
3021 int i;
3022 CORE_ADDR buildaddr;
3023
3024 EMIT_ASM32 (i386_int_call_1_a,
3025 /* Reserve a bit of stack space. */
3026 "sub $0x8,%esp");
3027 /* Put the one argument on the stack. */
3028 buildaddr = current_insn_ptr;
3029 i = 0;
3030 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3031 buf[i++] = 0x04;
3032 buf[i++] = 0x24;
b00ad6ff 3033 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3034 i += 4;
3035 append_insns (&buildaddr, i, buf);
3036 current_insn_ptr = buildaddr;
3037 i386_emit_call (fn);
3038 EMIT_ASM32 (i386_int_call_1_c,
3039 "mov %edx,%ebx\n\t"
3040 "lea 0x8(%esp),%esp");
3041}
3042
4e29fb54 3043/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3044
3045static void
3046i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3047{
3048 unsigned char buf[16];
3049 int i;
3050 CORE_ADDR buildaddr;
3051
3052 EMIT_ASM32 (i386_void_call_2_a,
3053 /* Preserve %eax only; we don't have to worry about %ebx. */
3054 "push %eax\n\t"
3055 /* Reserve a bit of stack space for arguments. */
3056 "sub $0x10,%esp\n\t"
3057 /* Copy "top" to the second argument position. (Note that
3058 we can't assume function won't scribble on its
3059 arguments, so don't try to restore from this.) */
3060 "mov %eax,4(%esp)\n\t"
3061 "mov %ebx,8(%esp)");
3062 /* Put the first argument on the stack. */
3063 buildaddr = current_insn_ptr;
3064 i = 0;
3065 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3066 buf[i++] = 0x04;
3067 buf[i++] = 0x24;
b00ad6ff 3068 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3069 i += 4;
3070 append_insns (&buildaddr, i, buf);
3071 current_insn_ptr = buildaddr;
3072 i386_emit_call (fn);
3073 EMIT_ASM32 (i386_void_call_2_b,
3074 "lea 0x10(%esp),%esp\n\t"
3075 /* Restore original stack top. */
3076 "pop %eax");
3077}
3078
6b9801d4
SS
3079
3080void
3081i386_emit_eq_goto (int *offset_p, int *size_p)
3082{
3083 EMIT_ASM32 (eq,
3084 /* Check low half first, more likely to be decider */
3085 "cmpl %eax,(%esp)\n\t"
3086 "jne .Leq_fallthru\n\t"
3087 "cmpl %ebx,4(%esp)\n\t"
3088 "jne .Leq_fallthru\n\t"
3089 "lea 0x8(%esp),%esp\n\t"
3090 "pop %eax\n\t"
3091 "pop %ebx\n\t"
3092 /* jmp, but don't trust the assembler to choose the right jump */
3093 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3094 ".Leq_fallthru:\n\t"
3095 "lea 0x8(%esp),%esp\n\t"
3096 "pop %eax\n\t"
3097 "pop %ebx");
3098
3099 if (offset_p)
3100 *offset_p = 18;
3101 if (size_p)
3102 *size_p = 4;
3103}
3104
3105void
3106i386_emit_ne_goto (int *offset_p, int *size_p)
3107{
3108 EMIT_ASM32 (ne,
3109 /* Check low half first, more likely to be decider */
3110 "cmpl %eax,(%esp)\n\t"
3111 "jne .Lne_jump\n\t"
3112 "cmpl %ebx,4(%esp)\n\t"
3113 "je .Lne_fallthru\n\t"
3114 ".Lne_jump:\n\t"
3115 "lea 0x8(%esp),%esp\n\t"
3116 "pop %eax\n\t"
3117 "pop %ebx\n\t"
3118 /* jmp, but don't trust the assembler to choose the right jump */
3119 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3120 ".Lne_fallthru:\n\t"
3121 "lea 0x8(%esp),%esp\n\t"
3122 "pop %eax\n\t"
3123 "pop %ebx");
3124
3125 if (offset_p)
3126 *offset_p = 18;
3127 if (size_p)
3128 *size_p = 4;
3129}
3130
3131void
3132i386_emit_lt_goto (int *offset_p, int *size_p)
3133{
3134 EMIT_ASM32 (lt,
3135 "cmpl %ebx,4(%esp)\n\t"
3136 "jl .Llt_jump\n\t"
3137 "jne .Llt_fallthru\n\t"
3138 "cmpl %eax,(%esp)\n\t"
3139 "jnl .Llt_fallthru\n\t"
3140 ".Llt_jump:\n\t"
3141 "lea 0x8(%esp),%esp\n\t"
3142 "pop %eax\n\t"
3143 "pop %ebx\n\t"
3144 /* jmp, but don't trust the assembler to choose the right jump */
3145 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3146 ".Llt_fallthru:\n\t"
3147 "lea 0x8(%esp),%esp\n\t"
3148 "pop %eax\n\t"
3149 "pop %ebx");
3150
3151 if (offset_p)
3152 *offset_p = 20;
3153 if (size_p)
3154 *size_p = 4;
3155}
3156
3157void
3158i386_emit_le_goto (int *offset_p, int *size_p)
3159{
3160 EMIT_ASM32 (le,
3161 "cmpl %ebx,4(%esp)\n\t"
3162 "jle .Lle_jump\n\t"
3163 "jne .Lle_fallthru\n\t"
3164 "cmpl %eax,(%esp)\n\t"
3165 "jnle .Lle_fallthru\n\t"
3166 ".Lle_jump:\n\t"
3167 "lea 0x8(%esp),%esp\n\t"
3168 "pop %eax\n\t"
3169 "pop %ebx\n\t"
3170 /* jmp, but don't trust the assembler to choose the right jump */
3171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3172 ".Lle_fallthru:\n\t"
3173 "lea 0x8(%esp),%esp\n\t"
3174 "pop %eax\n\t"
3175 "pop %ebx");
3176
3177 if (offset_p)
3178 *offset_p = 20;
3179 if (size_p)
3180 *size_p = 4;
3181}
3182
3183void
3184i386_emit_gt_goto (int *offset_p, int *size_p)
3185{
3186 EMIT_ASM32 (gt,
3187 "cmpl %ebx,4(%esp)\n\t"
3188 "jg .Lgt_jump\n\t"
3189 "jne .Lgt_fallthru\n\t"
3190 "cmpl %eax,(%esp)\n\t"
3191 "jng .Lgt_fallthru\n\t"
3192 ".Lgt_jump:\n\t"
3193 "lea 0x8(%esp),%esp\n\t"
3194 "pop %eax\n\t"
3195 "pop %ebx\n\t"
3196 /* jmp, but don't trust the assembler to choose the right jump */
3197 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3198 ".Lgt_fallthru:\n\t"
3199 "lea 0x8(%esp),%esp\n\t"
3200 "pop %eax\n\t"
3201 "pop %ebx");
3202
3203 if (offset_p)
3204 *offset_p = 20;
3205 if (size_p)
3206 *size_p = 4;
3207}
3208
3209void
3210i386_emit_ge_goto (int *offset_p, int *size_p)
3211{
3212 EMIT_ASM32 (ge,
3213 "cmpl %ebx,4(%esp)\n\t"
3214 "jge .Lge_jump\n\t"
3215 "jne .Lge_fallthru\n\t"
3216 "cmpl %eax,(%esp)\n\t"
3217 "jnge .Lge_fallthru\n\t"
3218 ".Lge_jump:\n\t"
3219 "lea 0x8(%esp),%esp\n\t"
3220 "pop %eax\n\t"
3221 "pop %ebx\n\t"
3222 /* jmp, but don't trust the assembler to choose the right jump */
3223 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3224 ".Lge_fallthru:\n\t"
3225 "lea 0x8(%esp),%esp\n\t"
3226 "pop %eax\n\t"
3227 "pop %ebx");
3228
3229 if (offset_p)
3230 *offset_p = 20;
3231 if (size_p)
3232 *size_p = 4;
3233}
3234
6a271cae
PA
3235struct emit_ops i386_emit_ops =
3236 {
3237 i386_emit_prologue,
3238 i386_emit_epilogue,
3239 i386_emit_add,
3240 i386_emit_sub,
3241 i386_emit_mul,
3242 i386_emit_lsh,
3243 i386_emit_rsh_signed,
3244 i386_emit_rsh_unsigned,
3245 i386_emit_ext,
3246 i386_emit_log_not,
3247 i386_emit_bit_and,
3248 i386_emit_bit_or,
3249 i386_emit_bit_xor,
3250 i386_emit_bit_not,
3251 i386_emit_equal,
3252 i386_emit_less_signed,
3253 i386_emit_less_unsigned,
3254 i386_emit_ref,
3255 i386_emit_if_goto,
3256 i386_emit_goto,
3257 i386_write_goto_address,
3258 i386_emit_const,
3259 i386_emit_call,
3260 i386_emit_reg,
3261 i386_emit_pop,
3262 i386_emit_stack_flush,
3263 i386_emit_zero_ext,
3264 i386_emit_swap,
3265 i386_emit_stack_adjust,
3266 i386_emit_int_call_1,
6b9801d4
SS
3267 i386_emit_void_call_2,
3268 i386_emit_eq_goto,
3269 i386_emit_ne_goto,
3270 i386_emit_lt_goto,
3271 i386_emit_le_goto,
3272 i386_emit_gt_goto,
3273 i386_emit_ge_goto
6a271cae
PA
3274 };
3275
3276
3277static struct emit_ops *
3278x86_emit_ops (void)
3279{
3280#ifdef __x86_64__
3aee8918 3281 if (is_64bit_tdesc ())
6a271cae
PA
3282 return &amd64_emit_ops;
3283 else
3284#endif
3285 return &i386_emit_ops;
3286}
3287
c2d6af84
PA
3288static int
3289x86_supports_range_stepping (void)
3290{
3291 return 1;
3292}
3293
d0722149
DE
3294/* This is initialized assuming an amd64 target.
3295 x86_arch_setup will correct it for i386 or amd64 targets. */
3296
3297struct linux_target_ops the_low_target =
3298{
3299 x86_arch_setup,
3aee8918
PA
3300 x86_linux_regs_info,
3301 x86_cannot_fetch_register,
3302 x86_cannot_store_register,
c14dfd32 3303 NULL, /* fetch_register */
d0722149
DE
3304 x86_get_pc,
3305 x86_set_pc,
3306 x86_breakpoint,
3307 x86_breakpoint_len,
3308 NULL,
3309 1,
3310 x86_breakpoint_at,
aa5ca48f
DE
3311 x86_insert_point,
3312 x86_remove_point,
3313 x86_stopped_by_watchpoint,
3314 x86_stopped_data_address,
d0722149
DE
3315 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3316 native i386 case (no registers smaller than an xfer unit), and are not
3317 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3318 NULL,
3319 NULL,
3320 /* need to fix up i386 siginfo if host is amd64 */
3321 x86_siginfo_fixup,
aa5ca48f
DE
3322 x86_linux_new_process,
3323 x86_linux_new_thread,
1570b33e 3324 x86_linux_prepare_to_resume,
219f2f23 3325 x86_linux_process_qsupported,
fa593d66
PA
3326 x86_supports_tracepoints,
3327 x86_get_thread_area,
6a271cae 3328 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3329 x86_emit_ops,
3330 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3331 x86_supports_range_stepping,
d0722149 3332};
3aee8918
PA
3333
3334void
3335initialize_low_arch (void)
3336{
3337 /* Initialize the Linux target descriptions. */
3338#ifdef __x86_64__
3339 init_registers_amd64_linux ();
3340 init_registers_amd64_avx_linux ();
3341 init_registers_x32_linux ();
7e5aaa09 3342 init_registers_x32_avx_linux ();
3aee8918
PA
3343
3344 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3345 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3346 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3347#endif
3348 init_registers_i386_linux ();
3349 init_registers_i386_mmx_linux ();
3350 init_registers_i386_avx_linux ();
3351
3352 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3353 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3354 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3355
3356 initialize_regsets_info (&x86_regsets_info);
3357}
This page took 0.646078 seconds and 4 git commands to generate.