Move low-level Linux x86 debug register code to a shared file
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
32d0add0 3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265
GB
26#include "x86-low.h"
27#include "x86-xstate.h"
d0722149
DE
28
29#include "gdb_proc_service.h"
b5737fa9
PA
30/* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32#ifndef ELFMAG0
33#include "elf/common.h"
34#endif
35
58b4daa5 36#include "agent.h"
3aee8918 37#include "tdesc.h"
c144c7a0 38#include "tracepoint.h"
f699aaba 39#include "ax.h"
7b669087 40#include "nat/linux-nat.h"
4b134ca1 41#include "nat/x86-linux.h"
8e5d4070 42#include "nat/x86-linux-dregs.h"
d0722149 43
3aee8918 44#ifdef __x86_64__
90884b2b
L
45/* Defined in auto-generated file amd64-linux.c. */
46void init_registers_amd64_linux (void);
3aee8918
PA
47extern const struct target_desc *tdesc_amd64_linux;
48
1570b33e
L
49/* Defined in auto-generated file amd64-avx-linux.c. */
50void init_registers_amd64_avx_linux (void);
3aee8918
PA
51extern const struct target_desc *tdesc_amd64_avx_linux;
52
01f9f808
MS
53/* Defined in auto-generated file amd64-avx512-linux.c. */
54void init_registers_amd64_avx512_linux (void);
55extern const struct target_desc *tdesc_amd64_avx512_linux;
56
a196ebeb
WT
57/* Defined in auto-generated file amd64-mpx-linux.c. */
58void init_registers_amd64_mpx_linux (void);
59extern const struct target_desc *tdesc_amd64_mpx_linux;
60
4d47af5c
L
61/* Defined in auto-generated file x32-linux.c. */
62void init_registers_x32_linux (void);
3aee8918
PA
63extern const struct target_desc *tdesc_x32_linux;
64
4d47af5c
L
65/* Defined in auto-generated file x32-avx-linux.c. */
66void init_registers_x32_avx_linux (void);
3aee8918 67extern const struct target_desc *tdesc_x32_avx_linux;
a196ebeb 68
01f9f808
MS
69/* Defined in auto-generated file x32-avx512-linux.c. */
70void init_registers_x32_avx512_linux (void);
71extern const struct target_desc *tdesc_x32_avx512_linux;
72
3aee8918
PA
73#endif
74
75/* Defined in auto-generated file i386-linux.c. */
76void init_registers_i386_linux (void);
77extern const struct target_desc *tdesc_i386_linux;
78
79/* Defined in auto-generated file i386-mmx-linux.c. */
80void init_registers_i386_mmx_linux (void);
81extern const struct target_desc *tdesc_i386_mmx_linux;
82
83/* Defined in auto-generated file i386-avx-linux.c. */
84void init_registers_i386_avx_linux (void);
85extern const struct target_desc *tdesc_i386_avx_linux;
86
01f9f808
MS
87/* Defined in auto-generated file i386-avx512-linux.c. */
88void init_registers_i386_avx512_linux (void);
89extern const struct target_desc *tdesc_i386_avx512_linux;
90
a196ebeb
WT
91/* Defined in auto-generated file i386-mpx-linux.c. */
92void init_registers_i386_mpx_linux (void);
93extern const struct target_desc *tdesc_i386_mpx_linux;
94
3aee8918
PA
95#ifdef __x86_64__
96static struct target_desc *tdesc_amd64_linux_no_xml;
97#endif
98static struct target_desc *tdesc_i386_linux_no_xml;
99
1570b33e 100
fa593d66 101static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 102static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 103
1570b33e
L
104/* Backward compatibility for gdb without XML support. */
105
106static const char *xmltarget_i386_linux_no_xml = "@<target>\
107<architecture>i386</architecture>\
108<osabi>GNU/Linux</osabi>\
109</target>";
f6d1620c
L
110
111#ifdef __x86_64__
1570b33e
L
112static const char *xmltarget_amd64_linux_no_xml = "@<target>\
113<architecture>i386:x86-64</architecture>\
114<osabi>GNU/Linux</osabi>\
115</target>";
f6d1620c 116#endif
d0722149
DE
117
118#include <sys/reg.h>
119#include <sys/procfs.h>
120#include <sys/ptrace.h>
1570b33e
L
121#include <sys/uio.h>
122
123#ifndef PTRACE_GETREGSET
124#define PTRACE_GETREGSET 0x4204
125#endif
126
127#ifndef PTRACE_SETREGSET
128#define PTRACE_SETREGSET 0x4205
129#endif
130
d0722149
DE
131
132#ifndef PTRACE_GET_THREAD_AREA
133#define PTRACE_GET_THREAD_AREA 25
134#endif
135
136/* This definition comes from prctl.h, but some kernels may not have it. */
137#ifndef PTRACE_ARCH_PRCTL
138#define PTRACE_ARCH_PRCTL 30
139#endif
140
141/* The following definitions come from prctl.h, but may be absent
142 for certain configurations. */
143#ifndef ARCH_GET_FS
144#define ARCH_SET_GS 0x1001
145#define ARCH_SET_FS 0x1002
146#define ARCH_GET_FS 0x1003
147#define ARCH_GET_GS 0x1004
148#endif
149
aa5ca48f
DE
150/* Per-process arch-specific data we want to keep. */
151
152struct arch_process_info
153{
df7e5265 154 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
155};
156
d0722149
DE
157#ifdef __x86_64__
158
159/* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162static /*const*/ int i386_regmap[] =
163{
164 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
165 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
166 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
167 DS * 8, ES * 8, FS * 8, GS * 8
168};
169
170#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
171
172/* So code below doesn't have to care, i386 or amd64. */
173#define ORIG_EAX ORIG_RAX
bc9540e8 174#define REGSIZE 8
d0722149
DE
175
176static const int x86_64_regmap[] =
177{
178 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
179 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
180 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
181 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
182 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
183 DS * 8, ES * 8, FS * 8, GS * 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
187 -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 ORIG_RAX * 8,
190 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
191 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
192 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1
d0722149
DE
201};
202
203#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 204#define X86_64_USER_REGS (GS + 1)
d0722149
DE
205
206#else /* ! __x86_64__ */
207
208/* Mapping between the general-purpose registers in `struct user'
209 format and GDB's register array layout. */
210static /*const*/ int i386_regmap[] =
211{
212 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
213 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
214 EIP * 4, EFL * 4, CS * 4, SS * 4,
215 DS * 4, ES * 4, FS * 4, GS * 4
216};
217
218#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
219
bc9540e8
PA
220#define REGSIZE 4
221
d0722149 222#endif
3aee8918
PA
223
224#ifdef __x86_64__
225
226/* Returns true if the current inferior belongs to a x86-64 process,
227 per the tdesc. */
228
229static int
230is_64bit_tdesc (void)
231{
0bfdf32f 232 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
233
234 return register_size (regcache->tdesc, 0) == 8;
235}
236
237#endif
238
d0722149
DE
239\f
240/* Called by libthread_db. */
241
242ps_err_e
243ps_get_thread_area (const struct ps_prochandle *ph,
244 lwpid_t lwpid, int idx, void **base)
245{
246#ifdef __x86_64__
3aee8918 247 int use_64bit = is_64bit_tdesc ();
d0722149
DE
248
249 if (use_64bit)
250 {
251 switch (idx)
252 {
253 case FS:
254 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
255 return PS_OK;
256 break;
257 case GS:
258 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
259 return PS_OK;
260 break;
261 default:
262 return PS_BADADDR;
263 }
264 return PS_ERR;
265 }
266#endif
267
268 {
269 unsigned int desc[4];
270
271 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
272 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
273 return PS_ERR;
274
d1ec4ce7
DE
275 /* Ensure we properly extend the value to 64-bits for x86_64. */
276 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
277 return PS_OK;
278 }
279}
fa593d66
PA
280
281/* Get the thread area address. This is used to recognize which
282 thread is which when tracing with the in-process agent library. We
283 don't read anything from the address, and treat it as opaque; it's
284 the address itself that we assume is unique per-thread. */
285
286static int
287x86_get_thread_area (int lwpid, CORE_ADDR *addr)
288{
289#ifdef __x86_64__
3aee8918 290 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
291
292 if (use_64bit)
293 {
294 void *base;
295 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
296 {
297 *addr = (CORE_ADDR) (uintptr_t) base;
298 return 0;
299 }
300
301 return -1;
302 }
303#endif
304
305 {
306 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
d86d4aaf
DE
307 struct thread_info *thr = get_lwp_thread (lwp);
308 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
309 unsigned int desc[4];
310 ULONGEST gs = 0;
311 const int reg_thread_area = 3; /* bits to scale down register value. */
312 int idx;
313
314 collect_register_by_name (regcache, "gs", &gs);
315
316 idx = gs >> reg_thread_area;
317
318 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 319 lwpid_of (thr),
493e2a69 320 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
321 return -1;
322
323 *addr = desc[1];
324 return 0;
325 }
326}
327
328
d0722149
DE
329\f
330static int
3aee8918 331x86_cannot_store_register (int regno)
d0722149 332{
3aee8918
PA
333#ifdef __x86_64__
334 if (is_64bit_tdesc ())
335 return 0;
336#endif
337
d0722149
DE
338 return regno >= I386_NUM_REGS;
339}
340
341static int
3aee8918 342x86_cannot_fetch_register (int regno)
d0722149 343{
3aee8918
PA
344#ifdef __x86_64__
345 if (is_64bit_tdesc ())
346 return 0;
347#endif
348
d0722149
DE
349 return regno >= I386_NUM_REGS;
350}
351
352static void
442ea881 353x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
354{
355 int i;
356
357#ifdef __x86_64__
3aee8918 358 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
359 {
360 for (i = 0; i < X86_64_NUM_REGS; i++)
361 if (x86_64_regmap[i] != -1)
442ea881 362 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
363 return;
364 }
9e0aa64f
JK
365
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
369#endif
370
371 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 372 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 373
442ea881 374 collect_register_by_name (regcache, "orig_eax",
bc9540e8 375 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
376}
377
378static void
442ea881 379x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
380{
381 int i;
382
383#ifdef __x86_64__
3aee8918 384 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
385 {
386 for (i = 0; i < X86_64_NUM_REGS; i++)
387 if (x86_64_regmap[i] != -1)
442ea881 388 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
389 return;
390 }
391#endif
392
393 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 394 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 395
442ea881 396 supply_register_by_name (regcache, "orig_eax",
bc9540e8 397 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
398}
399
400static void
442ea881 401x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
402{
403#ifdef __x86_64__
442ea881 404 i387_cache_to_fxsave (regcache, buf);
d0722149 405#else
442ea881 406 i387_cache_to_fsave (regcache, buf);
d0722149
DE
407#endif
408}
409
410static void
442ea881 411x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
412{
413#ifdef __x86_64__
442ea881 414 i387_fxsave_to_cache (regcache, buf);
d0722149 415#else
442ea881 416 i387_fsave_to_cache (regcache, buf);
d0722149
DE
417#endif
418}
419
420#ifndef __x86_64__
421
422static void
442ea881 423x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 424{
442ea881 425 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
426}
427
428static void
442ea881 429x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 430{
442ea881 431 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
432}
433
434#endif
435
1570b33e
L
436static void
437x86_fill_xstateregset (struct regcache *regcache, void *buf)
438{
439 i387_cache_to_xsave (regcache, buf);
440}
441
442static void
443x86_store_xstateregset (struct regcache *regcache, const void *buf)
444{
445 i387_xsave_to_cache (regcache, buf);
446}
447
d0722149
DE
448/* ??? The non-biarch i386 case stores all the i387 regs twice.
449 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
450 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
451 doesn't work. IWBN to avoid the duplication in the case where it
452 does work. Maybe the arch_setup routine could check whether it works
3aee8918 453 and update the supported regsets accordingly. */
d0722149 454
3aee8918 455static struct regset_info x86_regsets[] =
d0722149
DE
456{
457#ifdef HAVE_PTRACE_GETREGS
1570b33e 458 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
459 GENERAL_REGS,
460 x86_fill_gregset, x86_store_gregset },
1570b33e
L
461 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
462 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
463# ifndef __x86_64__
464# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 465 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
466 EXTENDED_REGS,
467 x86_fill_fpxregset, x86_store_fpxregset },
468# endif
469# endif
1570b33e 470 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
471 FP_REGS,
472 x86_fill_fpregset, x86_store_fpregset },
473#endif /* HAVE_PTRACE_GETREGS */
1570b33e 474 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
475};
476
477static CORE_ADDR
442ea881 478x86_get_pc (struct regcache *regcache)
d0722149 479{
3aee8918 480 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
481
482 if (use_64bit)
483 {
484 unsigned long pc;
442ea881 485 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
486 return (CORE_ADDR) pc;
487 }
488 else
489 {
490 unsigned int pc;
442ea881 491 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
492 return (CORE_ADDR) pc;
493 }
494}
495
496static void
442ea881 497x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 498{
3aee8918 499 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
500
501 if (use_64bit)
502 {
503 unsigned long newpc = pc;
442ea881 504 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
505 }
506 else
507 {
508 unsigned int newpc = pc;
442ea881 509 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
510 }
511}
512\f
513static const unsigned char x86_breakpoint[] = { 0xCC };
514#define x86_breakpoint_len 1
515
516static int
517x86_breakpoint_at (CORE_ADDR pc)
518{
519 unsigned char c;
520
fc7238bb 521 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
522 if (c == 0xCC)
523 return 1;
524
525 return 0;
526}
527\f
42995dbd 528/* Low-level function vector. */
df7e5265 529struct x86_dr_low_type x86_dr_low =
42995dbd 530 {
d33472ad
GB
531 x86_linux_dr_set_control,
532 x86_linux_dr_set_addr,
533 x86_linux_dr_get_addr,
534 x86_linux_dr_get_status,
535 x86_linux_dr_get_control,
42995dbd
GB
536 sizeof (void *),
537 };
aa5ca48f 538\f
90d74c30 539/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
540
541static int
802e8e6d
PA
542x86_supports_z_point_type (char z_type)
543{
544 switch (z_type)
545 {
546 case Z_PACKET_SW_BP:
547 case Z_PACKET_HW_BP:
548 case Z_PACKET_WRITE_WP:
549 case Z_PACKET_ACCESS_WP:
550 return 1;
551 default:
552 return 0;
553 }
554}
555
556static int
557x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
558 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
559{
560 struct process_info *proc = current_process ();
802e8e6d 561
aa5ca48f
DE
562 switch (type)
563 {
802e8e6d
PA
564 case raw_bkpt_type_sw:
565 return insert_memory_breakpoint (bp);
566
567 case raw_bkpt_type_hw:
568 case raw_bkpt_type_write_wp:
569 case raw_bkpt_type_access_wp:
a4165e94 570 {
802e8e6d
PA
571 enum target_hw_bp_type hw_type
572 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 573 struct x86_debug_reg_state *state
fe978cb0 574 = &proc->priv->arch_private->debug_reg_state;
a4165e94 575
df7e5265 576 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 577 }
961bd387 578
aa5ca48f
DE
579 default:
580 /* Unsupported. */
581 return 1;
582 }
583}
584
585static int
802e8e6d
PA
586x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
587 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
588{
589 struct process_info *proc = current_process ();
802e8e6d 590
aa5ca48f
DE
591 switch (type)
592 {
802e8e6d
PA
593 case raw_bkpt_type_sw:
594 return remove_memory_breakpoint (bp);
595
596 case raw_bkpt_type_hw:
597 case raw_bkpt_type_write_wp:
598 case raw_bkpt_type_access_wp:
a4165e94 599 {
802e8e6d
PA
600 enum target_hw_bp_type hw_type
601 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 602 struct x86_debug_reg_state *state
fe978cb0 603 = &proc->priv->arch_private->debug_reg_state;
a4165e94 604
df7e5265 605 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 606 }
aa5ca48f
DE
607 default:
608 /* Unsupported. */
609 return 1;
610 }
611}
612
613static int
614x86_stopped_by_watchpoint (void)
615{
616 struct process_info *proc = current_process ();
fe978cb0 617 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
618}
619
620static CORE_ADDR
621x86_stopped_data_address (void)
622{
623 struct process_info *proc = current_process ();
624 CORE_ADDR addr;
fe978cb0 625 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 626 &addr))
aa5ca48f
DE
627 return addr;
628 return 0;
629}
630\f
631/* Called when a new process is created. */
632
633static struct arch_process_info *
634x86_linux_new_process (void)
635{
ed859da7 636 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 637
df7e5265 638 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
639
640 return info;
641}
642
643/* Called when a new thread is detected. */
644
34c703da
GB
645static void
646x86_linux_new_thread (struct lwp_info *lwp)
aa5ca48f 647{
4b134ca1 648 lwp_set_debug_registers_changed (lwp, 1);
aa5ca48f
DE
649}
650
70a0bb6b
GB
651/* See nat/x86-dregs.h. */
652
653struct x86_debug_reg_state *
654x86_debug_reg_state (pid_t pid)
655{
656 struct process_info *proc = find_process_pid (pid);
657
658 return &proc->priv->arch_private->debug_reg_state;
659}
660
2b95d440
GB
661/* Called prior to resuming a thread. */
662
663static void
664x86_linux_prepare_to_resume (struct lwp_info *lwp)
665{
666 x86_linux_update_debug_registers (lwp);
667}
aa5ca48f 668\f
d0722149
DE
669/* When GDBSERVER is built as a 64-bit application on linux, the
670 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
671 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
672 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
673 conversion in-place ourselves. */
674
675/* These types below (compat_*) define a siginfo type that is layout
676 compatible with the siginfo type exported by the 32-bit userspace
677 support. */
678
679#ifdef __x86_64__
680
681typedef int compat_int_t;
682typedef unsigned int compat_uptr_t;
683
684typedef int compat_time_t;
685typedef int compat_timer_t;
686typedef int compat_clock_t;
687
688struct compat_timeval
689{
690 compat_time_t tv_sec;
691 int tv_usec;
692};
693
694typedef union compat_sigval
695{
696 compat_int_t sival_int;
697 compat_uptr_t sival_ptr;
698} compat_sigval_t;
699
700typedef struct compat_siginfo
701{
702 int si_signo;
703 int si_errno;
704 int si_code;
705
706 union
707 {
708 int _pad[((128 / sizeof (int)) - 3)];
709
710 /* kill() */
711 struct
712 {
713 unsigned int _pid;
714 unsigned int _uid;
715 } _kill;
716
717 /* POSIX.1b timers */
718 struct
719 {
720 compat_timer_t _tid;
721 int _overrun;
722 compat_sigval_t _sigval;
723 } _timer;
724
725 /* POSIX.1b signals */
726 struct
727 {
728 unsigned int _pid;
729 unsigned int _uid;
730 compat_sigval_t _sigval;
731 } _rt;
732
733 /* SIGCHLD */
734 struct
735 {
736 unsigned int _pid;
737 unsigned int _uid;
738 int _status;
739 compat_clock_t _utime;
740 compat_clock_t _stime;
741 } _sigchld;
742
743 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
744 struct
745 {
746 unsigned int _addr;
747 } _sigfault;
748
749 /* SIGPOLL */
750 struct
751 {
752 int _band;
753 int _fd;
754 } _sigpoll;
755 } _sifields;
756} compat_siginfo_t;
757
c92b5177
L
758/* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
759typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
760
761typedef struct compat_x32_siginfo
762{
763 int si_signo;
764 int si_errno;
765 int si_code;
766
767 union
768 {
769 int _pad[((128 / sizeof (int)) - 3)];
770
771 /* kill() */
772 struct
773 {
774 unsigned int _pid;
775 unsigned int _uid;
776 } _kill;
777
778 /* POSIX.1b timers */
779 struct
780 {
781 compat_timer_t _tid;
782 int _overrun;
783 compat_sigval_t _sigval;
784 } _timer;
785
786 /* POSIX.1b signals */
787 struct
788 {
789 unsigned int _pid;
790 unsigned int _uid;
791 compat_sigval_t _sigval;
792 } _rt;
793
794 /* SIGCHLD */
795 struct
796 {
797 unsigned int _pid;
798 unsigned int _uid;
799 int _status;
800 compat_x32_clock_t _utime;
801 compat_x32_clock_t _stime;
802 } _sigchld;
803
804 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
805 struct
806 {
807 unsigned int _addr;
808 } _sigfault;
809
810 /* SIGPOLL */
811 struct
812 {
813 int _band;
814 int _fd;
815 } _sigpoll;
816 } _sifields;
817} compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
818
d0722149
DE
819#define cpt_si_pid _sifields._kill._pid
820#define cpt_si_uid _sifields._kill._uid
821#define cpt_si_timerid _sifields._timer._tid
822#define cpt_si_overrun _sifields._timer._overrun
823#define cpt_si_status _sifields._sigchld._status
824#define cpt_si_utime _sifields._sigchld._utime
825#define cpt_si_stime _sifields._sigchld._stime
826#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
827#define cpt_si_addr _sifields._sigfault._addr
828#define cpt_si_band _sifields._sigpoll._band
829#define cpt_si_fd _sifields._sigpoll._fd
830
831/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
832 In their place is si_timer1,si_timer2. */
833#ifndef si_timerid
834#define si_timerid si_timer1
835#endif
836#ifndef si_overrun
837#define si_overrun si_timer2
838#endif
839
840static void
841compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
842{
843 memset (to, 0, sizeof (*to));
844
845 to->si_signo = from->si_signo;
846 to->si_errno = from->si_errno;
847 to->si_code = from->si_code;
848
b53a1623 849 if (to->si_code == SI_TIMER)
d0722149 850 {
b53a1623
PA
851 to->cpt_si_timerid = from->si_timerid;
852 to->cpt_si_overrun = from->si_overrun;
d0722149
DE
853 to->cpt_si_ptr = (intptr_t) from->si_ptr;
854 }
855 else if (to->si_code == SI_USER)
856 {
857 to->cpt_si_pid = from->si_pid;
858 to->cpt_si_uid = from->si_uid;
859 }
b53a1623 860 else if (to->si_code < 0)
d0722149 861 {
b53a1623
PA
862 to->cpt_si_pid = from->si_pid;
863 to->cpt_si_uid = from->si_uid;
d0722149
DE
864 to->cpt_si_ptr = (intptr_t) from->si_ptr;
865 }
866 else
867 {
868 switch (to->si_signo)
869 {
870 case SIGCHLD:
871 to->cpt_si_pid = from->si_pid;
872 to->cpt_si_uid = from->si_uid;
873 to->cpt_si_status = from->si_status;
874 to->cpt_si_utime = from->si_utime;
875 to->cpt_si_stime = from->si_stime;
876 break;
877 case SIGILL:
878 case SIGFPE:
879 case SIGSEGV:
880 case SIGBUS:
881 to->cpt_si_addr = (intptr_t) from->si_addr;
882 break;
883 case SIGPOLL:
884 to->cpt_si_band = from->si_band;
885 to->cpt_si_fd = from->si_fd;
886 break;
887 default:
888 to->cpt_si_pid = from->si_pid;
889 to->cpt_si_uid = from->si_uid;
890 to->cpt_si_ptr = (intptr_t) from->si_ptr;
891 break;
892 }
893 }
894}
895
896static void
897siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
898{
899 memset (to, 0, sizeof (*to));
900
901 to->si_signo = from->si_signo;
902 to->si_errno = from->si_errno;
903 to->si_code = from->si_code;
904
b53a1623 905 if (to->si_code == SI_TIMER)
d0722149 906 {
b53a1623
PA
907 to->si_timerid = from->cpt_si_timerid;
908 to->si_overrun = from->cpt_si_overrun;
d0722149
DE
909 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
910 }
911 else if (to->si_code == SI_USER)
912 {
913 to->si_pid = from->cpt_si_pid;
914 to->si_uid = from->cpt_si_uid;
915 }
b53a1623 916 else if (to->si_code < 0)
d0722149 917 {
b53a1623
PA
918 to->si_pid = from->cpt_si_pid;
919 to->si_uid = from->cpt_si_uid;
d0722149
DE
920 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
921 }
922 else
923 {
924 switch (to->si_signo)
925 {
926 case SIGCHLD:
927 to->si_pid = from->cpt_si_pid;
928 to->si_uid = from->cpt_si_uid;
929 to->si_status = from->cpt_si_status;
930 to->si_utime = from->cpt_si_utime;
931 to->si_stime = from->cpt_si_stime;
932 break;
933 case SIGILL:
934 case SIGFPE:
935 case SIGSEGV:
936 case SIGBUS:
937 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
938 break;
939 case SIGPOLL:
940 to->si_band = from->cpt_si_band;
941 to->si_fd = from->cpt_si_fd;
942 break;
943 default:
944 to->si_pid = from->cpt_si_pid;
945 to->si_uid = from->cpt_si_uid;
946 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
947 break;
948 }
949 }
950}
951
c92b5177
L
952static void
953compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
954 siginfo_t *from)
955{
956 memset (to, 0, sizeof (*to));
957
958 to->si_signo = from->si_signo;
959 to->si_errno = from->si_errno;
960 to->si_code = from->si_code;
961
962 if (to->si_code == SI_TIMER)
963 {
964 to->cpt_si_timerid = from->si_timerid;
965 to->cpt_si_overrun = from->si_overrun;
966 to->cpt_si_ptr = (intptr_t) from->si_ptr;
967 }
968 else if (to->si_code == SI_USER)
969 {
970 to->cpt_si_pid = from->si_pid;
971 to->cpt_si_uid = from->si_uid;
972 }
973 else if (to->si_code < 0)
974 {
975 to->cpt_si_pid = from->si_pid;
976 to->cpt_si_uid = from->si_uid;
977 to->cpt_si_ptr = (intptr_t) from->si_ptr;
978 }
979 else
980 {
981 switch (to->si_signo)
982 {
983 case SIGCHLD:
984 to->cpt_si_pid = from->si_pid;
985 to->cpt_si_uid = from->si_uid;
986 to->cpt_si_status = from->si_status;
987 to->cpt_si_utime = from->si_utime;
988 to->cpt_si_stime = from->si_stime;
989 break;
990 case SIGILL:
991 case SIGFPE:
992 case SIGSEGV:
993 case SIGBUS:
994 to->cpt_si_addr = (intptr_t) from->si_addr;
995 break;
996 case SIGPOLL:
997 to->cpt_si_band = from->si_band;
998 to->cpt_si_fd = from->si_fd;
999 break;
1000 default:
1001 to->cpt_si_pid = from->si_pid;
1002 to->cpt_si_uid = from->si_uid;
1003 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1004 break;
1005 }
1006 }
1007}
1008
1009static void
1010siginfo_from_compat_x32_siginfo (siginfo_t *to,
1011 compat_x32_siginfo_t *from)
1012{
1013 memset (to, 0, sizeof (*to));
1014
1015 to->si_signo = from->si_signo;
1016 to->si_errno = from->si_errno;
1017 to->si_code = from->si_code;
1018
1019 if (to->si_code == SI_TIMER)
1020 {
1021 to->si_timerid = from->cpt_si_timerid;
1022 to->si_overrun = from->cpt_si_overrun;
1023 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1024 }
1025 else if (to->si_code == SI_USER)
1026 {
1027 to->si_pid = from->cpt_si_pid;
1028 to->si_uid = from->cpt_si_uid;
1029 }
1030 else if (to->si_code < 0)
1031 {
1032 to->si_pid = from->cpt_si_pid;
1033 to->si_uid = from->cpt_si_uid;
1034 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1035 }
1036 else
1037 {
1038 switch (to->si_signo)
1039 {
1040 case SIGCHLD:
1041 to->si_pid = from->cpt_si_pid;
1042 to->si_uid = from->cpt_si_uid;
1043 to->si_status = from->cpt_si_status;
1044 to->si_utime = from->cpt_si_utime;
1045 to->si_stime = from->cpt_si_stime;
1046 break;
1047 case SIGILL:
1048 case SIGFPE:
1049 case SIGSEGV:
1050 case SIGBUS:
1051 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1052 break;
1053 case SIGPOLL:
1054 to->si_band = from->cpt_si_band;
1055 to->si_fd = from->cpt_si_fd;
1056 break;
1057 default:
1058 to->si_pid = from->cpt_si_pid;
1059 to->si_uid = from->cpt_si_uid;
1060 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1061 break;
1062 }
1063 }
1064}
1065
d0722149
DE
1066#endif /* __x86_64__ */
1067
1068/* Convert a native/host siginfo object, into/from the siginfo in the
1069 layout of the inferiors' architecture. Returns true if any
1070 conversion was done; false otherwise. If DIRECTION is 1, then copy
1071 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1072 INF. */
1073
1074static int
a5362b9a 1075x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
d0722149
DE
1076{
1077#ifdef __x86_64__
760256f9 1078 unsigned int machine;
0bfdf32f 1079 int tid = lwpid_of (current_thread);
760256f9
PA
1080 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1081
d0722149 1082 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 1083 if (!is_64bit_tdesc ())
d0722149 1084 {
38e08fca 1085 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
d0722149
DE
1086
1087 if (direction == 0)
1088 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1089 else
1090 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1091
c92b5177
L
1092 return 1;
1093 }
1094 /* No fixup for native x32 GDB. */
760256f9 1095 else if (!is_elf64 && sizeof (void *) == 8)
c92b5177 1096 {
38e08fca 1097 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
c92b5177
L
1098
1099 if (direction == 0)
1100 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1101 native);
1102 else
1103 siginfo_from_compat_x32_siginfo (native,
1104 (struct compat_x32_siginfo *) inf);
1105
d0722149
DE
1106 return 1;
1107 }
1108#endif
1109
1110 return 0;
1111}
1112\f
1570b33e
L
1113static int use_xml;
1114
3aee8918
PA
1115/* Format of XSAVE extended state is:
1116 struct
1117 {
1118 fxsave_bytes[0..463]
1119 sw_usable_bytes[464..511]
1120 xstate_hdr_bytes[512..575]
1121 avx_bytes[576..831]
1122 future_state etc
1123 };
1124
1125 Same memory layout will be used for the coredump NT_X86_XSTATE
1126 representing the XSAVE extended state registers.
1127
1128 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1129 extended state mask, which is the same as the extended control register
1130 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1131 together with the mask saved in the xstate_hdr_bytes to determine what
1132 states the processor/OS supports and what state, used or initialized,
1133 the process/thread is in. */
1134#define I386_LINUX_XSAVE_XCR0_OFFSET 464
1135
1136/* Does the current host support the GETFPXREGS request? The header
1137 file may or may not define it, and even if it is defined, the
1138 kernel will return EIO if it's running on a pre-SSE processor. */
1139int have_ptrace_getfpxregs =
1140#ifdef HAVE_PTRACE_GETFPXREGS
1141 -1
1142#else
1143 0
1144#endif
1145;
1570b33e 1146
3aee8918
PA
1147/* Does the current host support PTRACE_GETREGSET? */
1148static int have_ptrace_getregset = -1;
1149
1150/* Get Linux/x86 target description from running target. */
1151
1152static const struct target_desc *
1153x86_linux_read_description (void)
1570b33e 1154{
3aee8918
PA
1155 unsigned int machine;
1156 int is_elf64;
a196ebeb 1157 int xcr0_features;
3aee8918
PA
1158 int tid;
1159 static uint64_t xcr0;
3a13a53b 1160 struct regset_info *regset;
1570b33e 1161
0bfdf32f 1162 tid = lwpid_of (current_thread);
1570b33e 1163
3aee8918 1164 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 1165
3aee8918 1166 if (sizeof (void *) == 4)
3a13a53b 1167 {
3aee8918
PA
1168 if (is_elf64 > 0)
1169 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1170#ifndef __x86_64__
1171 else if (machine == EM_X86_64)
1172 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1173#endif
1174 }
3a13a53b 1175
3aee8918
PA
1176#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1177 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1178 {
1179 elf_fpxregset_t fpxregs;
3a13a53b 1180
3aee8918 1181 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 1182 {
3aee8918
PA
1183 have_ptrace_getfpxregs = 0;
1184 have_ptrace_getregset = 0;
1185 return tdesc_i386_mmx_linux;
3a13a53b 1186 }
3aee8918
PA
1187 else
1188 have_ptrace_getfpxregs = 1;
3a13a53b 1189 }
1570b33e
L
1190#endif
1191
1192 if (!use_xml)
1193 {
df7e5265 1194 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 1195
1570b33e
L
1196 /* Don't use XML. */
1197#ifdef __x86_64__
3aee8918
PA
1198 if (machine == EM_X86_64)
1199 return tdesc_amd64_linux_no_xml;
1570b33e 1200 else
1570b33e 1201#endif
3aee8918 1202 return tdesc_i386_linux_no_xml;
1570b33e
L
1203 }
1204
1570b33e
L
1205 if (have_ptrace_getregset == -1)
1206 {
df7e5265 1207 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 1208 struct iovec iov;
1570b33e
L
1209
1210 iov.iov_base = xstateregs;
1211 iov.iov_len = sizeof (xstateregs);
1212
1213 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
1214 if (ptrace (PTRACE_GETREGSET, tid,
1215 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1216 have_ptrace_getregset = 0;
1217 else
1570b33e 1218 {
3aee8918
PA
1219 have_ptrace_getregset = 1;
1220
1221 /* Get XCR0 from XSAVE extended state. */
1222 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1223 / sizeof (uint64_t))];
1224
1225 /* Use PTRACE_GETREGSET if it is available. */
1226 for (regset = x86_regsets;
1227 regset->fill_function != NULL; regset++)
1228 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 1229 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
1230 else if (regset->type != GENERAL_REGS)
1231 regset->size = 0;
1570b33e 1232 }
1570b33e
L
1233 }
1234
3aee8918 1235 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 1236 xcr0_features = (have_ptrace_getregset
df7e5265 1237 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 1238
a196ebeb 1239 if (xcr0_features)
3aee8918 1240 x86_xcr0 = xcr0;
1570b33e 1241
3aee8918
PA
1242 if (machine == EM_X86_64)
1243 {
1570b33e 1244#ifdef __x86_64__
a196ebeb 1245 if (is_elf64)
3aee8918 1246 {
a196ebeb
WT
1247 if (xcr0_features)
1248 {
df7e5265 1249 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1250 {
df7e5265 1251 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1252 return tdesc_amd64_avx512_linux;
1253
df7e5265 1254 case X86_XSTATE_MPX_MASK:
a196ebeb
WT
1255 return tdesc_amd64_mpx_linux;
1256
df7e5265 1257 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1258 return tdesc_amd64_avx_linux;
1259
1260 default:
1261 return tdesc_amd64_linux;
1262 }
1263 }
4d47af5c 1264 else
a196ebeb 1265 return tdesc_amd64_linux;
3aee8918
PA
1266 }
1267 else
1268 {
a196ebeb
WT
1269 if (xcr0_features)
1270 {
df7e5265 1271 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1272 {
df7e5265 1273 case X86_XSTATE_AVX512_MASK:
01f9f808
MS
1274 return tdesc_x32_avx512_linux;
1275
df7e5265
GB
1276 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1277 case X86_XSTATE_AVX_MASK:
a196ebeb
WT
1278 return tdesc_x32_avx_linux;
1279
1280 default:
1281 return tdesc_x32_linux;
1282 }
1283 }
3aee8918 1284 else
a196ebeb 1285 return tdesc_x32_linux;
1570b33e 1286 }
3aee8918 1287#endif
1570b33e 1288 }
3aee8918
PA
1289 else
1290 {
a196ebeb
WT
1291 if (xcr0_features)
1292 {
df7e5265 1293 switch (xcr0 & X86_XSTATE_ALL_MASK)
a196ebeb 1294 {
df7e5265 1295 case (X86_XSTATE_AVX512_MASK):
01f9f808
MS
1296 return tdesc_i386_avx512_linux;
1297
df7e5265 1298 case (X86_XSTATE_MPX_MASK):
a196ebeb
WT
1299 return tdesc_i386_mpx_linux;
1300
df7e5265 1301 case (X86_XSTATE_AVX_MASK):
a196ebeb
WT
1302 return tdesc_i386_avx_linux;
1303
1304 default:
1305 return tdesc_i386_linux;
1306 }
1307 }
3aee8918
PA
1308 else
1309 return tdesc_i386_linux;
1310 }
1311
1312 gdb_assert_not_reached ("failed to return tdesc");
1313}
1314
1315/* Callback for find_inferior. Stops iteration when a thread with a
1316 given PID is found. */
1317
1318static int
1319same_process_callback (struct inferior_list_entry *entry, void *data)
1320{
1321 int pid = *(int *) data;
1322
1323 return (ptid_get_pid (entry->id) == pid);
1324}
1325
1326/* Callback for for_each_inferior. Calls the arch_setup routine for
1327 each process. */
1328
1329static void
1330x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1331{
1332 int pid = ptid_get_pid (entry->id);
1333
1334 /* Look up any thread of this processes. */
0bfdf32f 1335 current_thread
3aee8918
PA
1336 = (struct thread_info *) find_inferior (&all_threads,
1337 same_process_callback, &pid);
1338
1339 the_low_target.arch_setup ();
1340}
1341
1342/* Update all the target description of all processes; a new GDB
1343 connected, and it may or not support xml target descriptions. */
1344
1345static void
1346x86_linux_update_xmltarget (void)
1347{
0bfdf32f 1348 struct thread_info *saved_thread = current_thread;
3aee8918
PA
1349
1350 /* Before changing the register cache's internal layout, flush the
1351 contents of the current valid caches back to the threads, and
1352 release the current regcache objects. */
1353 regcache_release ();
1354
1355 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1356
0bfdf32f 1357 current_thread = saved_thread;
1570b33e
L
1358}
1359
1360/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1361 PTRACE_GETREGSET. */
1362
1363static void
1364x86_linux_process_qsupported (const char *query)
1365{
1366 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1367 with "i386" in qSupported query, it supports x86 XML target
1368 descriptions. */
1369 use_xml = 0;
61012eef 1370 if (query != NULL && startswith (query, "xmlRegisters="))
1570b33e
L
1371 {
1372 char *copy = xstrdup (query + 13);
1373 char *p;
1374
1375 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1376 {
1377 if (strcmp (p, "i386") == 0)
1378 {
1379 use_xml = 1;
1380 break;
1381 }
1382 }
1383
1384 free (copy);
1385 }
1386
1387 x86_linux_update_xmltarget ();
1388}
1389
3aee8918 1390/* Common for x86/x86-64. */
d0722149 1391
3aee8918
PA
1392static struct regsets_info x86_regsets_info =
1393 {
1394 x86_regsets, /* regsets */
1395 0, /* num_regsets */
1396 NULL, /* disabled_regsets */
1397 };
214d508e
L
1398
1399#ifdef __x86_64__
3aee8918
PA
1400static struct regs_info amd64_linux_regs_info =
1401 {
1402 NULL, /* regset_bitmap */
1403 NULL, /* usrregs_info */
1404 &x86_regsets_info
1405 };
d0722149 1406#endif
3aee8918
PA
1407static struct usrregs_info i386_linux_usrregs_info =
1408 {
1409 I386_NUM_REGS,
1410 i386_regmap,
1411 };
d0722149 1412
3aee8918
PA
1413static struct regs_info i386_linux_regs_info =
1414 {
1415 NULL, /* regset_bitmap */
1416 &i386_linux_usrregs_info,
1417 &x86_regsets_info
1418 };
d0722149 1419
3aee8918
PA
1420const struct regs_info *
1421x86_linux_regs_info (void)
1422{
1423#ifdef __x86_64__
1424 if (is_64bit_tdesc ())
1425 return &amd64_linux_regs_info;
1426 else
1427#endif
1428 return &i386_linux_regs_info;
1429}
d0722149 1430
3aee8918
PA
1431/* Initialize the target description for the architecture of the
1432 inferior. */
1570b33e 1433
3aee8918
PA
1434static void
1435x86_arch_setup (void)
1436{
1437 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1438}
1439
219f2f23
PA
1440static int
1441x86_supports_tracepoints (void)
1442{
1443 return 1;
1444}
1445
fa593d66
PA
1446static void
1447append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1448{
1449 write_inferior_memory (*to, buf, len);
1450 *to += len;
1451}
1452
1453static int
1454push_opcode (unsigned char *buf, char *op)
1455{
1456 unsigned char *buf_org = buf;
1457
1458 while (1)
1459 {
1460 char *endptr;
1461 unsigned long ul = strtoul (op, &endptr, 16);
1462
1463 if (endptr == op)
1464 break;
1465
1466 *buf++ = ul;
1467 op = endptr;
1468 }
1469
1470 return buf - buf_org;
1471}
1472
1473#ifdef __x86_64__
1474
1475/* Build a jump pad that saves registers and calls a collection
1476 function. Writes a jump instruction to the jump pad to
1477 JJUMPAD_INSN. The caller is responsible to write it in at the
1478 tracepoint address. */
1479
1480static int
1481amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1482 CORE_ADDR collector,
1483 CORE_ADDR lockaddr,
1484 ULONGEST orig_size,
1485 CORE_ADDR *jump_entry,
405f8e94
SS
1486 CORE_ADDR *trampoline,
1487 ULONGEST *trampoline_size,
fa593d66
PA
1488 unsigned char *jjump_pad_insn,
1489 ULONGEST *jjump_pad_insn_size,
1490 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1491 CORE_ADDR *adjusted_insn_addr_end,
1492 char *err)
fa593d66
PA
1493{
1494 unsigned char buf[40];
1495 int i, offset;
f4647387
YQ
1496 int64_t loffset;
1497
fa593d66
PA
1498 CORE_ADDR buildaddr = *jump_entry;
1499
1500 /* Build the jump pad. */
1501
1502 /* First, do tracepoint data collection. Save registers. */
1503 i = 0;
1504 /* Need to ensure stack pointer saved first. */
1505 buf[i++] = 0x54; /* push %rsp */
1506 buf[i++] = 0x55; /* push %rbp */
1507 buf[i++] = 0x57; /* push %rdi */
1508 buf[i++] = 0x56; /* push %rsi */
1509 buf[i++] = 0x52; /* push %rdx */
1510 buf[i++] = 0x51; /* push %rcx */
1511 buf[i++] = 0x53; /* push %rbx */
1512 buf[i++] = 0x50; /* push %rax */
1513 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1514 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1515 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1516 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1517 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1518 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1519 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1520 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1521 buf[i++] = 0x9c; /* pushfq */
1522 buf[i++] = 0x48; /* movl <addr>,%rdi */
1523 buf[i++] = 0xbf;
1524 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1525 i += sizeof (unsigned long);
1526 buf[i++] = 0x57; /* push %rdi */
1527 append_insns (&buildaddr, i, buf);
1528
1529 /* Stack space for the collecting_t object. */
1530 i = 0;
1531 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1532 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1533 memcpy (buf + i, &tpoint, 8);
1534 i += 8;
1535 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1536 i += push_opcode (&buf[i],
1537 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1538 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1539 append_insns (&buildaddr, i, buf);
1540
1541 /* spin-lock. */
1542 i = 0;
1543 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1544 memcpy (&buf[i], (void *) &lockaddr, 8);
1545 i += 8;
1546 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1547 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1548 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1549 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1550 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1551 append_insns (&buildaddr, i, buf);
1552
1553 /* Set up the gdb_collect call. */
1554 /* At this point, (stack pointer + 0x18) is the base of our saved
1555 register block. */
1556
1557 i = 0;
1558 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1559 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1560
1561 /* tpoint address may be 64-bit wide. */
1562 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1563 memcpy (buf + i, &tpoint, 8);
1564 i += 8;
1565 append_insns (&buildaddr, i, buf);
1566
1567 /* The collector function being in the shared library, may be
1568 >31-bits away off the jump pad. */
1569 i = 0;
1570 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1571 memcpy (buf + i, &collector, 8);
1572 i += 8;
1573 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1574 append_insns (&buildaddr, i, buf);
1575
1576 /* Clear the spin-lock. */
1577 i = 0;
1578 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1579 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1580 memcpy (buf + i, &lockaddr, 8);
1581 i += 8;
1582 append_insns (&buildaddr, i, buf);
1583
1584 /* Remove stack that had been used for the collect_t object. */
1585 i = 0;
1586 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1587 append_insns (&buildaddr, i, buf);
1588
1589 /* Restore register state. */
1590 i = 0;
1591 buf[i++] = 0x48; /* add $0x8,%rsp */
1592 buf[i++] = 0x83;
1593 buf[i++] = 0xc4;
1594 buf[i++] = 0x08;
1595 buf[i++] = 0x9d; /* popfq */
1596 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1597 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1598 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1599 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1600 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1601 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1602 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1603 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1604 buf[i++] = 0x58; /* pop %rax */
1605 buf[i++] = 0x5b; /* pop %rbx */
1606 buf[i++] = 0x59; /* pop %rcx */
1607 buf[i++] = 0x5a; /* pop %rdx */
1608 buf[i++] = 0x5e; /* pop %rsi */
1609 buf[i++] = 0x5f; /* pop %rdi */
1610 buf[i++] = 0x5d; /* pop %rbp */
1611 buf[i++] = 0x5c; /* pop %rsp */
1612 append_insns (&buildaddr, i, buf);
1613
1614 /* Now, adjust the original instruction to execute in the jump
1615 pad. */
1616 *adjusted_insn_addr = buildaddr;
1617 relocate_instruction (&buildaddr, tpaddr);
1618 *adjusted_insn_addr_end = buildaddr;
1619
1620 /* Finally, write a jump back to the program. */
f4647387
YQ
1621
1622 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1623 if (loffset > INT_MAX || loffset < INT_MIN)
1624 {
1625 sprintf (err,
1626 "E.Jump back from jump pad too far from tracepoint "
1627 "(offset 0x%" PRIx64 " > int32).", loffset);
1628 return 1;
1629 }
1630
1631 offset = (int) loffset;
fa593d66
PA
1632 memcpy (buf, jump_insn, sizeof (jump_insn));
1633 memcpy (buf + 1, &offset, 4);
1634 append_insns (&buildaddr, sizeof (jump_insn), buf);
1635
1636 /* The jump pad is now built. Wire in a jump to our jump pad. This
1637 is always done last (by our caller actually), so that we can
1638 install fast tracepoints with threads running. This relies on
1639 the agent's atomic write support. */
f4647387
YQ
1640 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1641 if (loffset > INT_MAX || loffset < INT_MIN)
1642 {
1643 sprintf (err,
1644 "E.Jump pad too far from tracepoint "
1645 "(offset 0x%" PRIx64 " > int32).", loffset);
1646 return 1;
1647 }
1648
1649 offset = (int) loffset;
1650
fa593d66
PA
1651 memcpy (buf, jump_insn, sizeof (jump_insn));
1652 memcpy (buf + 1, &offset, 4);
1653 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1654 *jjump_pad_insn_size = sizeof (jump_insn);
1655
1656 /* Return the end address of our pad. */
1657 *jump_entry = buildaddr;
1658
1659 return 0;
1660}
1661
1662#endif /* __x86_64__ */
1663
1664/* Build a jump pad that saves registers and calls a collection
1665 function. Writes a jump instruction to the jump pad to
1666 JJUMPAD_INSN. The caller is responsible to write it in at the
1667 tracepoint address. */
1668
1669static int
1670i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1671 CORE_ADDR collector,
1672 CORE_ADDR lockaddr,
1673 ULONGEST orig_size,
1674 CORE_ADDR *jump_entry,
405f8e94
SS
1675 CORE_ADDR *trampoline,
1676 ULONGEST *trampoline_size,
fa593d66
PA
1677 unsigned char *jjump_pad_insn,
1678 ULONGEST *jjump_pad_insn_size,
1679 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1680 CORE_ADDR *adjusted_insn_addr_end,
1681 char *err)
fa593d66
PA
1682{
1683 unsigned char buf[0x100];
1684 int i, offset;
1685 CORE_ADDR buildaddr = *jump_entry;
1686
1687 /* Build the jump pad. */
1688
1689 /* First, do tracepoint data collection. Save registers. */
1690 i = 0;
1691 buf[i++] = 0x60; /* pushad */
1692 buf[i++] = 0x68; /* push tpaddr aka $pc */
1693 *((int *)(buf + i)) = (int) tpaddr;
1694 i += 4;
1695 buf[i++] = 0x9c; /* pushf */
1696 buf[i++] = 0x1e; /* push %ds */
1697 buf[i++] = 0x06; /* push %es */
1698 buf[i++] = 0x0f; /* push %fs */
1699 buf[i++] = 0xa0;
1700 buf[i++] = 0x0f; /* push %gs */
1701 buf[i++] = 0xa8;
1702 buf[i++] = 0x16; /* push %ss */
1703 buf[i++] = 0x0e; /* push %cs */
1704 append_insns (&buildaddr, i, buf);
1705
1706 /* Stack space for the collecting_t object. */
1707 i = 0;
1708 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1709
1710 /* Build the object. */
1711 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1712 memcpy (buf + i, &tpoint, 4);
1713 i += 4;
1714 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1715
1716 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1717 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1718 append_insns (&buildaddr, i, buf);
1719
1720 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1721 If we cared for it, this could be using xchg alternatively. */
1722
1723 i = 0;
1724 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1725 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1726 %esp,<lockaddr> */
1727 memcpy (&buf[i], (void *) &lockaddr, 4);
1728 i += 4;
1729 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1730 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1731 append_insns (&buildaddr, i, buf);
1732
1733
1734 /* Set up arguments to the gdb_collect call. */
1735 i = 0;
1736 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1737 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1738 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1739 append_insns (&buildaddr, i, buf);
1740
1741 i = 0;
1742 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1743 append_insns (&buildaddr, i, buf);
1744
1745 i = 0;
1746 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1747 memcpy (&buf[i], (void *) &tpoint, 4);
1748 i += 4;
1749 append_insns (&buildaddr, i, buf);
1750
1751 buf[0] = 0xe8; /* call <reladdr> */
1752 offset = collector - (buildaddr + sizeof (jump_insn));
1753 memcpy (buf + 1, &offset, 4);
1754 append_insns (&buildaddr, 5, buf);
1755 /* Clean up after the call. */
1756 buf[0] = 0x83; /* add $0x8,%esp */
1757 buf[1] = 0xc4;
1758 buf[2] = 0x08;
1759 append_insns (&buildaddr, 3, buf);
1760
1761
1762 /* Clear the spin-lock. This would need the LOCK prefix on older
1763 broken archs. */
1764 i = 0;
1765 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1766 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1767 memcpy (buf + i, &lockaddr, 4);
1768 i += 4;
1769 append_insns (&buildaddr, i, buf);
1770
1771
1772 /* Remove stack that had been used for the collect_t object. */
1773 i = 0;
1774 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1775 append_insns (&buildaddr, i, buf);
1776
1777 i = 0;
1778 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1779 buf[i++] = 0xc4;
1780 buf[i++] = 0x04;
1781 buf[i++] = 0x17; /* pop %ss */
1782 buf[i++] = 0x0f; /* pop %gs */
1783 buf[i++] = 0xa9;
1784 buf[i++] = 0x0f; /* pop %fs */
1785 buf[i++] = 0xa1;
1786 buf[i++] = 0x07; /* pop %es */
405f8e94 1787 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1788 buf[i++] = 0x9d; /* popf */
1789 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1790 buf[i++] = 0xc4;
1791 buf[i++] = 0x04;
1792 buf[i++] = 0x61; /* popad */
1793 append_insns (&buildaddr, i, buf);
1794
1795 /* Now, adjust the original instruction to execute in the jump
1796 pad. */
1797 *adjusted_insn_addr = buildaddr;
1798 relocate_instruction (&buildaddr, tpaddr);
1799 *adjusted_insn_addr_end = buildaddr;
1800
1801 /* Write the jump back to the program. */
1802 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1803 memcpy (buf, jump_insn, sizeof (jump_insn));
1804 memcpy (buf + 1, &offset, 4);
1805 append_insns (&buildaddr, sizeof (jump_insn), buf);
1806
1807 /* The jump pad is now built. Wire in a jump to our jump pad. This
1808 is always done last (by our caller actually), so that we can
1809 install fast tracepoints with threads running. This relies on
1810 the agent's atomic write support. */
405f8e94
SS
1811 if (orig_size == 4)
1812 {
1813 /* Create a trampoline. */
1814 *trampoline_size = sizeof (jump_insn);
1815 if (!claim_trampoline_space (*trampoline_size, trampoline))
1816 {
1817 /* No trampoline space available. */
1818 strcpy (err,
1819 "E.Cannot allocate trampoline space needed for fast "
1820 "tracepoints on 4-byte instructions.");
1821 return 1;
1822 }
1823
1824 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1825 memcpy (buf, jump_insn, sizeof (jump_insn));
1826 memcpy (buf + 1, &offset, 4);
1827 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1828
1829 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1830 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1831 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1832 memcpy (buf + 2, &offset, 2);
1833 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1834 *jjump_pad_insn_size = sizeof (small_jump_insn);
1835 }
1836 else
1837 {
1838 /* Else use a 32-bit relative jump instruction. */
1839 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1840 memcpy (buf, jump_insn, sizeof (jump_insn));
1841 memcpy (buf + 1, &offset, 4);
1842 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1843 *jjump_pad_insn_size = sizeof (jump_insn);
1844 }
fa593d66
PA
1845
1846 /* Return the end address of our pad. */
1847 *jump_entry = buildaddr;
1848
1849 return 0;
1850}
1851
1852static int
1853x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1854 CORE_ADDR collector,
1855 CORE_ADDR lockaddr,
1856 ULONGEST orig_size,
1857 CORE_ADDR *jump_entry,
405f8e94
SS
1858 CORE_ADDR *trampoline,
1859 ULONGEST *trampoline_size,
fa593d66
PA
1860 unsigned char *jjump_pad_insn,
1861 ULONGEST *jjump_pad_insn_size,
1862 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1863 CORE_ADDR *adjusted_insn_addr_end,
1864 char *err)
fa593d66
PA
1865{
1866#ifdef __x86_64__
3aee8918 1867 if (is_64bit_tdesc ())
fa593d66
PA
1868 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1869 collector, lockaddr,
1870 orig_size, jump_entry,
405f8e94 1871 trampoline, trampoline_size,
fa593d66
PA
1872 jjump_pad_insn,
1873 jjump_pad_insn_size,
1874 adjusted_insn_addr,
405f8e94
SS
1875 adjusted_insn_addr_end,
1876 err);
fa593d66
PA
1877#endif
1878
1879 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1880 collector, lockaddr,
1881 orig_size, jump_entry,
405f8e94 1882 trampoline, trampoline_size,
fa593d66
PA
1883 jjump_pad_insn,
1884 jjump_pad_insn_size,
1885 adjusted_insn_addr,
405f8e94
SS
1886 adjusted_insn_addr_end,
1887 err);
1888}
1889
1890/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1891 architectures. */
1892
1893static int
1894x86_get_min_fast_tracepoint_insn_len (void)
1895{
1896 static int warned_about_fast_tracepoints = 0;
1897
1898#ifdef __x86_64__
1899 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1900 used for fast tracepoints. */
3aee8918 1901 if (is_64bit_tdesc ())
405f8e94
SS
1902 return 5;
1903#endif
1904
58b4daa5 1905 if (agent_loaded_p ())
405f8e94
SS
1906 {
1907 char errbuf[IPA_BUFSIZ];
1908
1909 errbuf[0] = '\0';
1910
1911 /* On x86, if trampolines are available, then 4-byte jump instructions
1912 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1913 with a 4-byte offset are used instead. */
1914 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1915 return 4;
1916 else
1917 {
1918 /* GDB has no channel to explain to user why a shorter fast
1919 tracepoint is not possible, but at least make GDBserver
1920 mention that something has gone awry. */
1921 if (!warned_about_fast_tracepoints)
1922 {
1923 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1924 warned_about_fast_tracepoints = 1;
1925 }
1926 return 5;
1927 }
1928 }
1929 else
1930 {
1931 /* Indicate that the minimum length is currently unknown since the IPA
1932 has not loaded yet. */
1933 return 0;
1934 }
fa593d66
PA
1935}
1936
6a271cae
PA
1937static void
1938add_insns (unsigned char *start, int len)
1939{
1940 CORE_ADDR buildaddr = current_insn_ptr;
1941
1942 if (debug_threads)
87ce2a04
DE
1943 debug_printf ("Adding %d bytes of insn at %s\n",
1944 len, paddress (buildaddr));
6a271cae
PA
1945
1946 append_insns (&buildaddr, len, start);
1947 current_insn_ptr = buildaddr;
1948}
1949
6a271cae
PA
1950/* Our general strategy for emitting code is to avoid specifying raw
1951 bytes whenever possible, and instead copy a block of inline asm
1952 that is embedded in the function. This is a little messy, because
1953 we need to keep the compiler from discarding what looks like dead
1954 code, plus suppress various warnings. */
1955
9e4344e5
PA
1956#define EMIT_ASM(NAME, INSNS) \
1957 do \
1958 { \
1959 extern unsigned char start_ ## NAME, end_ ## NAME; \
1960 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1961 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1962 "\t" "start_" #NAME ":" \
1963 "\t" INSNS "\n" \
1964 "\t" "end_" #NAME ":"); \
1965 } while (0)
6a271cae
PA
1966
1967#ifdef __x86_64__
1968
1969#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1970 do \
1971 { \
1972 extern unsigned char start_ ## NAME, end_ ## NAME; \
1973 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1974 __asm__ (".code32\n" \
1975 "\t" "jmp end_" #NAME "\n" \
1976 "\t" "start_" #NAME ":\n" \
1977 "\t" INSNS "\n" \
1978 "\t" "end_" #NAME ":\n" \
1979 ".code64\n"); \
1980 } while (0)
6a271cae
PA
1981
1982#else
1983
1984#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1985
1986#endif
1987
1988#ifdef __x86_64__
1989
1990static void
1991amd64_emit_prologue (void)
1992{
1993 EMIT_ASM (amd64_prologue,
1994 "pushq %rbp\n\t"
1995 "movq %rsp,%rbp\n\t"
1996 "sub $0x20,%rsp\n\t"
1997 "movq %rdi,-8(%rbp)\n\t"
1998 "movq %rsi,-16(%rbp)");
1999}
2000
2001
2002static void
2003amd64_emit_epilogue (void)
2004{
2005 EMIT_ASM (amd64_epilogue,
2006 "movq -16(%rbp),%rdi\n\t"
2007 "movq %rax,(%rdi)\n\t"
2008 "xor %rax,%rax\n\t"
2009 "leave\n\t"
2010 "ret");
2011}
2012
2013static void
2014amd64_emit_add (void)
2015{
2016 EMIT_ASM (amd64_add,
2017 "add (%rsp),%rax\n\t"
2018 "lea 0x8(%rsp),%rsp");
2019}
2020
2021static void
2022amd64_emit_sub (void)
2023{
2024 EMIT_ASM (amd64_sub,
2025 "sub %rax,(%rsp)\n\t"
2026 "pop %rax");
2027}
2028
2029static void
2030amd64_emit_mul (void)
2031{
2032 emit_error = 1;
2033}
2034
2035static void
2036amd64_emit_lsh (void)
2037{
2038 emit_error = 1;
2039}
2040
2041static void
2042amd64_emit_rsh_signed (void)
2043{
2044 emit_error = 1;
2045}
2046
2047static void
2048amd64_emit_rsh_unsigned (void)
2049{
2050 emit_error = 1;
2051}
2052
2053static void
2054amd64_emit_ext (int arg)
2055{
2056 switch (arg)
2057 {
2058 case 8:
2059 EMIT_ASM (amd64_ext_8,
2060 "cbtw\n\t"
2061 "cwtl\n\t"
2062 "cltq");
2063 break;
2064 case 16:
2065 EMIT_ASM (amd64_ext_16,
2066 "cwtl\n\t"
2067 "cltq");
2068 break;
2069 case 32:
2070 EMIT_ASM (amd64_ext_32,
2071 "cltq");
2072 break;
2073 default:
2074 emit_error = 1;
2075 }
2076}
2077
2078static void
2079amd64_emit_log_not (void)
2080{
2081 EMIT_ASM (amd64_log_not,
2082 "test %rax,%rax\n\t"
2083 "sete %cl\n\t"
2084 "movzbq %cl,%rax");
2085}
2086
2087static void
2088amd64_emit_bit_and (void)
2089{
2090 EMIT_ASM (amd64_and,
2091 "and (%rsp),%rax\n\t"
2092 "lea 0x8(%rsp),%rsp");
2093}
2094
2095static void
2096amd64_emit_bit_or (void)
2097{
2098 EMIT_ASM (amd64_or,
2099 "or (%rsp),%rax\n\t"
2100 "lea 0x8(%rsp),%rsp");
2101}
2102
2103static void
2104amd64_emit_bit_xor (void)
2105{
2106 EMIT_ASM (amd64_xor,
2107 "xor (%rsp),%rax\n\t"
2108 "lea 0x8(%rsp),%rsp");
2109}
2110
2111static void
2112amd64_emit_bit_not (void)
2113{
2114 EMIT_ASM (amd64_bit_not,
2115 "xorq $0xffffffffffffffff,%rax");
2116}
2117
2118static void
2119amd64_emit_equal (void)
2120{
2121 EMIT_ASM (amd64_equal,
2122 "cmp %rax,(%rsp)\n\t"
2123 "je .Lamd64_equal_true\n\t"
2124 "xor %rax,%rax\n\t"
2125 "jmp .Lamd64_equal_end\n\t"
2126 ".Lamd64_equal_true:\n\t"
2127 "mov $0x1,%rax\n\t"
2128 ".Lamd64_equal_end:\n\t"
2129 "lea 0x8(%rsp),%rsp");
2130}
2131
2132static void
2133amd64_emit_less_signed (void)
2134{
2135 EMIT_ASM (amd64_less_signed,
2136 "cmp %rax,(%rsp)\n\t"
2137 "jl .Lamd64_less_signed_true\n\t"
2138 "xor %rax,%rax\n\t"
2139 "jmp .Lamd64_less_signed_end\n\t"
2140 ".Lamd64_less_signed_true:\n\t"
2141 "mov $1,%rax\n\t"
2142 ".Lamd64_less_signed_end:\n\t"
2143 "lea 0x8(%rsp),%rsp");
2144}
2145
2146static void
2147amd64_emit_less_unsigned (void)
2148{
2149 EMIT_ASM (amd64_less_unsigned,
2150 "cmp %rax,(%rsp)\n\t"
2151 "jb .Lamd64_less_unsigned_true\n\t"
2152 "xor %rax,%rax\n\t"
2153 "jmp .Lamd64_less_unsigned_end\n\t"
2154 ".Lamd64_less_unsigned_true:\n\t"
2155 "mov $1,%rax\n\t"
2156 ".Lamd64_less_unsigned_end:\n\t"
2157 "lea 0x8(%rsp),%rsp");
2158}
2159
2160static void
2161amd64_emit_ref (int size)
2162{
2163 switch (size)
2164 {
2165 case 1:
2166 EMIT_ASM (amd64_ref1,
2167 "movb (%rax),%al");
2168 break;
2169 case 2:
2170 EMIT_ASM (amd64_ref2,
2171 "movw (%rax),%ax");
2172 break;
2173 case 4:
2174 EMIT_ASM (amd64_ref4,
2175 "movl (%rax),%eax");
2176 break;
2177 case 8:
2178 EMIT_ASM (amd64_ref8,
2179 "movq (%rax),%rax");
2180 break;
2181 }
2182}
2183
2184static void
2185amd64_emit_if_goto (int *offset_p, int *size_p)
2186{
2187 EMIT_ASM (amd64_if_goto,
2188 "mov %rax,%rcx\n\t"
2189 "pop %rax\n\t"
2190 "cmp $0,%rcx\n\t"
2191 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2192 if (offset_p)
2193 *offset_p = 10;
2194 if (size_p)
2195 *size_p = 4;
2196}
2197
2198static void
2199amd64_emit_goto (int *offset_p, int *size_p)
2200{
2201 EMIT_ASM (amd64_goto,
2202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2203 if (offset_p)
2204 *offset_p = 1;
2205 if (size_p)
2206 *size_p = 4;
2207}
2208
2209static void
2210amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2211{
2212 int diff = (to - (from + size));
2213 unsigned char buf[sizeof (int)];
2214
2215 if (size != 4)
2216 {
2217 emit_error = 1;
2218 return;
2219 }
2220
2221 memcpy (buf, &diff, sizeof (int));
2222 write_inferior_memory (from, buf, sizeof (int));
2223}
2224
2225static void
4e29fb54 2226amd64_emit_const (LONGEST num)
6a271cae
PA
2227{
2228 unsigned char buf[16];
2229 int i;
2230 CORE_ADDR buildaddr = current_insn_ptr;
2231
2232 i = 0;
2233 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 2234 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
2235 i += 8;
2236 append_insns (&buildaddr, i, buf);
2237 current_insn_ptr = buildaddr;
2238}
2239
2240static void
2241amd64_emit_call (CORE_ADDR fn)
2242{
2243 unsigned char buf[16];
2244 int i;
2245 CORE_ADDR buildaddr;
4e29fb54 2246 LONGEST offset64;
6a271cae
PA
2247
2248 /* The destination function being in the shared library, may be
2249 >31-bits away off the compiled code pad. */
2250
2251 buildaddr = current_insn_ptr;
2252
2253 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2254
2255 i = 0;
2256
2257 if (offset64 > INT_MAX || offset64 < INT_MIN)
2258 {
2259 /* Offset is too large for a call. Use callq, but that requires
2260 a register, so avoid it if possible. Use r10, since it is
2261 call-clobbered, we don't have to push/pop it. */
2262 buf[i++] = 0x48; /* mov $fn,%r10 */
2263 buf[i++] = 0xba;
2264 memcpy (buf + i, &fn, 8);
2265 i += 8;
2266 buf[i++] = 0xff; /* callq *%r10 */
2267 buf[i++] = 0xd2;
2268 }
2269 else
2270 {
2271 int offset32 = offset64; /* we know we can't overflow here. */
2272 memcpy (buf + i, &offset32, 4);
2273 i += 4;
2274 }
2275
2276 append_insns (&buildaddr, i, buf);
2277 current_insn_ptr = buildaddr;
2278}
2279
2280static void
2281amd64_emit_reg (int reg)
2282{
2283 unsigned char buf[16];
2284 int i;
2285 CORE_ADDR buildaddr;
2286
2287 /* Assume raw_regs is still in %rdi. */
2288 buildaddr = current_insn_ptr;
2289 i = 0;
2290 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 2291 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2292 i += 4;
2293 append_insns (&buildaddr, i, buf);
2294 current_insn_ptr = buildaddr;
2295 amd64_emit_call (get_raw_reg_func_addr ());
2296}
2297
2298static void
2299amd64_emit_pop (void)
2300{
2301 EMIT_ASM (amd64_pop,
2302 "pop %rax");
2303}
2304
2305static void
2306amd64_emit_stack_flush (void)
2307{
2308 EMIT_ASM (amd64_stack_flush,
2309 "push %rax");
2310}
2311
2312static void
2313amd64_emit_zero_ext (int arg)
2314{
2315 switch (arg)
2316 {
2317 case 8:
2318 EMIT_ASM (amd64_zero_ext_8,
2319 "and $0xff,%rax");
2320 break;
2321 case 16:
2322 EMIT_ASM (amd64_zero_ext_16,
2323 "and $0xffff,%rax");
2324 break;
2325 case 32:
2326 EMIT_ASM (amd64_zero_ext_32,
2327 "mov $0xffffffff,%rcx\n\t"
2328 "and %rcx,%rax");
2329 break;
2330 default:
2331 emit_error = 1;
2332 }
2333}
2334
2335static void
2336amd64_emit_swap (void)
2337{
2338 EMIT_ASM (amd64_swap,
2339 "mov %rax,%rcx\n\t"
2340 "pop %rax\n\t"
2341 "push %rcx");
2342}
2343
2344static void
2345amd64_emit_stack_adjust (int n)
2346{
2347 unsigned char buf[16];
2348 int i;
2349 CORE_ADDR buildaddr = current_insn_ptr;
2350
2351 i = 0;
2352 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2353 buf[i++] = 0x8d;
2354 buf[i++] = 0x64;
2355 buf[i++] = 0x24;
2356 /* This only handles adjustments up to 16, but we don't expect any more. */
2357 buf[i++] = n * 8;
2358 append_insns (&buildaddr, i, buf);
2359 current_insn_ptr = buildaddr;
2360}
2361
2362/* FN's prototype is `LONGEST(*fn)(int)'. */
2363
2364static void
2365amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2366{
2367 unsigned char buf[16];
2368 int i;
2369 CORE_ADDR buildaddr;
2370
2371 buildaddr = current_insn_ptr;
2372 i = 0;
2373 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2374 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2375 i += 4;
2376 append_insns (&buildaddr, i, buf);
2377 current_insn_ptr = buildaddr;
2378 amd64_emit_call (fn);
2379}
2380
4e29fb54 2381/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2382
2383static void
2384amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2385{
2386 unsigned char buf[16];
2387 int i;
2388 CORE_ADDR buildaddr;
2389
2390 buildaddr = current_insn_ptr;
2391 i = 0;
2392 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2393 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2394 i += 4;
2395 append_insns (&buildaddr, i, buf);
2396 current_insn_ptr = buildaddr;
2397 EMIT_ASM (amd64_void_call_2_a,
2398 /* Save away a copy of the stack top. */
2399 "push %rax\n\t"
2400 /* Also pass top as the second argument. */
2401 "mov %rax,%rsi");
2402 amd64_emit_call (fn);
2403 EMIT_ASM (amd64_void_call_2_b,
2404 /* Restore the stack top, %rax may have been trashed. */
2405 "pop %rax");
2406}
2407
6b9801d4
SS
2408void
2409amd64_emit_eq_goto (int *offset_p, int *size_p)
2410{
2411 EMIT_ASM (amd64_eq,
2412 "cmp %rax,(%rsp)\n\t"
2413 "jne .Lamd64_eq_fallthru\n\t"
2414 "lea 0x8(%rsp),%rsp\n\t"
2415 "pop %rax\n\t"
2416 /* jmp, but don't trust the assembler to choose the right jump */
2417 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2418 ".Lamd64_eq_fallthru:\n\t"
2419 "lea 0x8(%rsp),%rsp\n\t"
2420 "pop %rax");
2421
2422 if (offset_p)
2423 *offset_p = 13;
2424 if (size_p)
2425 *size_p = 4;
2426}
2427
2428void
2429amd64_emit_ne_goto (int *offset_p, int *size_p)
2430{
2431 EMIT_ASM (amd64_ne,
2432 "cmp %rax,(%rsp)\n\t"
2433 "je .Lamd64_ne_fallthru\n\t"
2434 "lea 0x8(%rsp),%rsp\n\t"
2435 "pop %rax\n\t"
2436 /* jmp, but don't trust the assembler to choose the right jump */
2437 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2438 ".Lamd64_ne_fallthru:\n\t"
2439 "lea 0x8(%rsp),%rsp\n\t"
2440 "pop %rax");
2441
2442 if (offset_p)
2443 *offset_p = 13;
2444 if (size_p)
2445 *size_p = 4;
2446}
2447
2448void
2449amd64_emit_lt_goto (int *offset_p, int *size_p)
2450{
2451 EMIT_ASM (amd64_lt,
2452 "cmp %rax,(%rsp)\n\t"
2453 "jnl .Lamd64_lt_fallthru\n\t"
2454 "lea 0x8(%rsp),%rsp\n\t"
2455 "pop %rax\n\t"
2456 /* jmp, but don't trust the assembler to choose the right jump */
2457 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2458 ".Lamd64_lt_fallthru:\n\t"
2459 "lea 0x8(%rsp),%rsp\n\t"
2460 "pop %rax");
2461
2462 if (offset_p)
2463 *offset_p = 13;
2464 if (size_p)
2465 *size_p = 4;
2466}
2467
2468void
2469amd64_emit_le_goto (int *offset_p, int *size_p)
2470{
2471 EMIT_ASM (amd64_le,
2472 "cmp %rax,(%rsp)\n\t"
2473 "jnle .Lamd64_le_fallthru\n\t"
2474 "lea 0x8(%rsp),%rsp\n\t"
2475 "pop %rax\n\t"
2476 /* jmp, but don't trust the assembler to choose the right jump */
2477 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2478 ".Lamd64_le_fallthru:\n\t"
2479 "lea 0x8(%rsp),%rsp\n\t"
2480 "pop %rax");
2481
2482 if (offset_p)
2483 *offset_p = 13;
2484 if (size_p)
2485 *size_p = 4;
2486}
2487
2488void
2489amd64_emit_gt_goto (int *offset_p, int *size_p)
2490{
2491 EMIT_ASM (amd64_gt,
2492 "cmp %rax,(%rsp)\n\t"
2493 "jng .Lamd64_gt_fallthru\n\t"
2494 "lea 0x8(%rsp),%rsp\n\t"
2495 "pop %rax\n\t"
2496 /* jmp, but don't trust the assembler to choose the right jump */
2497 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2498 ".Lamd64_gt_fallthru:\n\t"
2499 "lea 0x8(%rsp),%rsp\n\t"
2500 "pop %rax");
2501
2502 if (offset_p)
2503 *offset_p = 13;
2504 if (size_p)
2505 *size_p = 4;
2506}
2507
2508void
2509amd64_emit_ge_goto (int *offset_p, int *size_p)
2510{
2511 EMIT_ASM (amd64_ge,
2512 "cmp %rax,(%rsp)\n\t"
2513 "jnge .Lamd64_ge_fallthru\n\t"
2514 ".Lamd64_ge_jump:\n\t"
2515 "lea 0x8(%rsp),%rsp\n\t"
2516 "pop %rax\n\t"
2517 /* jmp, but don't trust the assembler to choose the right jump */
2518 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2519 ".Lamd64_ge_fallthru:\n\t"
2520 "lea 0x8(%rsp),%rsp\n\t"
2521 "pop %rax");
2522
2523 if (offset_p)
2524 *offset_p = 13;
2525 if (size_p)
2526 *size_p = 4;
2527}
2528
6a271cae
PA
2529struct emit_ops amd64_emit_ops =
2530 {
2531 amd64_emit_prologue,
2532 amd64_emit_epilogue,
2533 amd64_emit_add,
2534 amd64_emit_sub,
2535 amd64_emit_mul,
2536 amd64_emit_lsh,
2537 amd64_emit_rsh_signed,
2538 amd64_emit_rsh_unsigned,
2539 amd64_emit_ext,
2540 amd64_emit_log_not,
2541 amd64_emit_bit_and,
2542 amd64_emit_bit_or,
2543 amd64_emit_bit_xor,
2544 amd64_emit_bit_not,
2545 amd64_emit_equal,
2546 amd64_emit_less_signed,
2547 amd64_emit_less_unsigned,
2548 amd64_emit_ref,
2549 amd64_emit_if_goto,
2550 amd64_emit_goto,
2551 amd64_write_goto_address,
2552 amd64_emit_const,
2553 amd64_emit_call,
2554 amd64_emit_reg,
2555 amd64_emit_pop,
2556 amd64_emit_stack_flush,
2557 amd64_emit_zero_ext,
2558 amd64_emit_swap,
2559 amd64_emit_stack_adjust,
2560 amd64_emit_int_call_1,
6b9801d4
SS
2561 amd64_emit_void_call_2,
2562 amd64_emit_eq_goto,
2563 amd64_emit_ne_goto,
2564 amd64_emit_lt_goto,
2565 amd64_emit_le_goto,
2566 amd64_emit_gt_goto,
2567 amd64_emit_ge_goto
6a271cae
PA
2568 };
2569
2570#endif /* __x86_64__ */
2571
2572static void
2573i386_emit_prologue (void)
2574{
2575 EMIT_ASM32 (i386_prologue,
2576 "push %ebp\n\t"
bf15cbda
SS
2577 "mov %esp,%ebp\n\t"
2578 "push %ebx");
6a271cae
PA
2579 /* At this point, the raw regs base address is at 8(%ebp), and the
2580 value pointer is at 12(%ebp). */
2581}
2582
2583static void
2584i386_emit_epilogue (void)
2585{
2586 EMIT_ASM32 (i386_epilogue,
2587 "mov 12(%ebp),%ecx\n\t"
2588 "mov %eax,(%ecx)\n\t"
2589 "mov %ebx,0x4(%ecx)\n\t"
2590 "xor %eax,%eax\n\t"
bf15cbda 2591 "pop %ebx\n\t"
6a271cae
PA
2592 "pop %ebp\n\t"
2593 "ret");
2594}
2595
2596static void
2597i386_emit_add (void)
2598{
2599 EMIT_ASM32 (i386_add,
2600 "add (%esp),%eax\n\t"
2601 "adc 0x4(%esp),%ebx\n\t"
2602 "lea 0x8(%esp),%esp");
2603}
2604
2605static void
2606i386_emit_sub (void)
2607{
2608 EMIT_ASM32 (i386_sub,
2609 "subl %eax,(%esp)\n\t"
2610 "sbbl %ebx,4(%esp)\n\t"
2611 "pop %eax\n\t"
2612 "pop %ebx\n\t");
2613}
2614
2615static void
2616i386_emit_mul (void)
2617{
2618 emit_error = 1;
2619}
2620
2621static void
2622i386_emit_lsh (void)
2623{
2624 emit_error = 1;
2625}
2626
2627static void
2628i386_emit_rsh_signed (void)
2629{
2630 emit_error = 1;
2631}
2632
2633static void
2634i386_emit_rsh_unsigned (void)
2635{
2636 emit_error = 1;
2637}
2638
2639static void
2640i386_emit_ext (int arg)
2641{
2642 switch (arg)
2643 {
2644 case 8:
2645 EMIT_ASM32 (i386_ext_8,
2646 "cbtw\n\t"
2647 "cwtl\n\t"
2648 "movl %eax,%ebx\n\t"
2649 "sarl $31,%ebx");
2650 break;
2651 case 16:
2652 EMIT_ASM32 (i386_ext_16,
2653 "cwtl\n\t"
2654 "movl %eax,%ebx\n\t"
2655 "sarl $31,%ebx");
2656 break;
2657 case 32:
2658 EMIT_ASM32 (i386_ext_32,
2659 "movl %eax,%ebx\n\t"
2660 "sarl $31,%ebx");
2661 break;
2662 default:
2663 emit_error = 1;
2664 }
2665}
2666
2667static void
2668i386_emit_log_not (void)
2669{
2670 EMIT_ASM32 (i386_log_not,
2671 "or %ebx,%eax\n\t"
2672 "test %eax,%eax\n\t"
2673 "sete %cl\n\t"
2674 "xor %ebx,%ebx\n\t"
2675 "movzbl %cl,%eax");
2676}
2677
2678static void
2679i386_emit_bit_and (void)
2680{
2681 EMIT_ASM32 (i386_and,
2682 "and (%esp),%eax\n\t"
2683 "and 0x4(%esp),%ebx\n\t"
2684 "lea 0x8(%esp),%esp");
2685}
2686
2687static void
2688i386_emit_bit_or (void)
2689{
2690 EMIT_ASM32 (i386_or,
2691 "or (%esp),%eax\n\t"
2692 "or 0x4(%esp),%ebx\n\t"
2693 "lea 0x8(%esp),%esp");
2694}
2695
2696static void
2697i386_emit_bit_xor (void)
2698{
2699 EMIT_ASM32 (i386_xor,
2700 "xor (%esp),%eax\n\t"
2701 "xor 0x4(%esp),%ebx\n\t"
2702 "lea 0x8(%esp),%esp");
2703}
2704
2705static void
2706i386_emit_bit_not (void)
2707{
2708 EMIT_ASM32 (i386_bit_not,
2709 "xor $0xffffffff,%eax\n\t"
2710 "xor $0xffffffff,%ebx\n\t");
2711}
2712
2713static void
2714i386_emit_equal (void)
2715{
2716 EMIT_ASM32 (i386_equal,
2717 "cmpl %ebx,4(%esp)\n\t"
2718 "jne .Li386_equal_false\n\t"
2719 "cmpl %eax,(%esp)\n\t"
2720 "je .Li386_equal_true\n\t"
2721 ".Li386_equal_false:\n\t"
2722 "xor %eax,%eax\n\t"
2723 "jmp .Li386_equal_end\n\t"
2724 ".Li386_equal_true:\n\t"
2725 "mov $1,%eax\n\t"
2726 ".Li386_equal_end:\n\t"
2727 "xor %ebx,%ebx\n\t"
2728 "lea 0x8(%esp),%esp");
2729}
2730
2731static void
2732i386_emit_less_signed (void)
2733{
2734 EMIT_ASM32 (i386_less_signed,
2735 "cmpl %ebx,4(%esp)\n\t"
2736 "jl .Li386_less_signed_true\n\t"
2737 "jne .Li386_less_signed_false\n\t"
2738 "cmpl %eax,(%esp)\n\t"
2739 "jl .Li386_less_signed_true\n\t"
2740 ".Li386_less_signed_false:\n\t"
2741 "xor %eax,%eax\n\t"
2742 "jmp .Li386_less_signed_end\n\t"
2743 ".Li386_less_signed_true:\n\t"
2744 "mov $1,%eax\n\t"
2745 ".Li386_less_signed_end:\n\t"
2746 "xor %ebx,%ebx\n\t"
2747 "lea 0x8(%esp),%esp");
2748}
2749
2750static void
2751i386_emit_less_unsigned (void)
2752{
2753 EMIT_ASM32 (i386_less_unsigned,
2754 "cmpl %ebx,4(%esp)\n\t"
2755 "jb .Li386_less_unsigned_true\n\t"
2756 "jne .Li386_less_unsigned_false\n\t"
2757 "cmpl %eax,(%esp)\n\t"
2758 "jb .Li386_less_unsigned_true\n\t"
2759 ".Li386_less_unsigned_false:\n\t"
2760 "xor %eax,%eax\n\t"
2761 "jmp .Li386_less_unsigned_end\n\t"
2762 ".Li386_less_unsigned_true:\n\t"
2763 "mov $1,%eax\n\t"
2764 ".Li386_less_unsigned_end:\n\t"
2765 "xor %ebx,%ebx\n\t"
2766 "lea 0x8(%esp),%esp");
2767}
2768
2769static void
2770i386_emit_ref (int size)
2771{
2772 switch (size)
2773 {
2774 case 1:
2775 EMIT_ASM32 (i386_ref1,
2776 "movb (%eax),%al");
2777 break;
2778 case 2:
2779 EMIT_ASM32 (i386_ref2,
2780 "movw (%eax),%ax");
2781 break;
2782 case 4:
2783 EMIT_ASM32 (i386_ref4,
2784 "movl (%eax),%eax");
2785 break;
2786 case 8:
2787 EMIT_ASM32 (i386_ref8,
2788 "movl 4(%eax),%ebx\n\t"
2789 "movl (%eax),%eax");
2790 break;
2791 }
2792}
2793
2794static void
2795i386_emit_if_goto (int *offset_p, int *size_p)
2796{
2797 EMIT_ASM32 (i386_if_goto,
2798 "mov %eax,%ecx\n\t"
2799 "or %ebx,%ecx\n\t"
2800 "pop %eax\n\t"
2801 "pop %ebx\n\t"
2802 "cmpl $0,%ecx\n\t"
2803 /* Don't trust the assembler to choose the right jump */
2804 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2805
2806 if (offset_p)
2807 *offset_p = 11; /* be sure that this matches the sequence above */
2808 if (size_p)
2809 *size_p = 4;
2810}
2811
2812static void
2813i386_emit_goto (int *offset_p, int *size_p)
2814{
2815 EMIT_ASM32 (i386_goto,
2816 /* Don't trust the assembler to choose the right jump */
2817 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2818 if (offset_p)
2819 *offset_p = 1;
2820 if (size_p)
2821 *size_p = 4;
2822}
2823
2824static void
2825i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2826{
2827 int diff = (to - (from + size));
2828 unsigned char buf[sizeof (int)];
2829
2830 /* We're only doing 4-byte sizes at the moment. */
2831 if (size != 4)
2832 {
2833 emit_error = 1;
2834 return;
2835 }
2836
2837 memcpy (buf, &diff, sizeof (int));
2838 write_inferior_memory (from, buf, sizeof (int));
2839}
2840
2841static void
4e29fb54 2842i386_emit_const (LONGEST num)
6a271cae
PA
2843{
2844 unsigned char buf[16];
b00ad6ff 2845 int i, hi, lo;
6a271cae
PA
2846 CORE_ADDR buildaddr = current_insn_ptr;
2847
2848 i = 0;
2849 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2850 lo = num & 0xffffffff;
2851 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2852 i += 4;
2853 hi = ((num >> 32) & 0xffffffff);
2854 if (hi)
2855 {
2856 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2857 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2858 i += 4;
2859 }
2860 else
2861 {
2862 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2863 }
2864 append_insns (&buildaddr, i, buf);
2865 current_insn_ptr = buildaddr;
2866}
2867
2868static void
2869i386_emit_call (CORE_ADDR fn)
2870{
2871 unsigned char buf[16];
2872 int i, offset;
2873 CORE_ADDR buildaddr;
2874
2875 buildaddr = current_insn_ptr;
2876 i = 0;
2877 buf[i++] = 0xe8; /* call <reladdr> */
2878 offset = ((int) fn) - (buildaddr + 5);
2879 memcpy (buf + 1, &offset, 4);
2880 append_insns (&buildaddr, 5, buf);
2881 current_insn_ptr = buildaddr;
2882}
2883
2884static void
2885i386_emit_reg (int reg)
2886{
2887 unsigned char buf[16];
2888 int i;
2889 CORE_ADDR buildaddr;
2890
2891 EMIT_ASM32 (i386_reg_a,
2892 "sub $0x8,%esp");
2893 buildaddr = current_insn_ptr;
2894 i = 0;
2895 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2896 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2897 i += 4;
2898 append_insns (&buildaddr, i, buf);
2899 current_insn_ptr = buildaddr;
2900 EMIT_ASM32 (i386_reg_b,
2901 "mov %eax,4(%esp)\n\t"
2902 "mov 8(%ebp),%eax\n\t"
2903 "mov %eax,(%esp)");
2904 i386_emit_call (get_raw_reg_func_addr ());
2905 EMIT_ASM32 (i386_reg_c,
2906 "xor %ebx,%ebx\n\t"
2907 "lea 0x8(%esp),%esp");
2908}
2909
2910static void
2911i386_emit_pop (void)
2912{
2913 EMIT_ASM32 (i386_pop,
2914 "pop %eax\n\t"
2915 "pop %ebx");
2916}
2917
2918static void
2919i386_emit_stack_flush (void)
2920{
2921 EMIT_ASM32 (i386_stack_flush,
2922 "push %ebx\n\t"
2923 "push %eax");
2924}
2925
2926static void
2927i386_emit_zero_ext (int arg)
2928{
2929 switch (arg)
2930 {
2931 case 8:
2932 EMIT_ASM32 (i386_zero_ext_8,
2933 "and $0xff,%eax\n\t"
2934 "xor %ebx,%ebx");
2935 break;
2936 case 16:
2937 EMIT_ASM32 (i386_zero_ext_16,
2938 "and $0xffff,%eax\n\t"
2939 "xor %ebx,%ebx");
2940 break;
2941 case 32:
2942 EMIT_ASM32 (i386_zero_ext_32,
2943 "xor %ebx,%ebx");
2944 break;
2945 default:
2946 emit_error = 1;
2947 }
2948}
2949
2950static void
2951i386_emit_swap (void)
2952{
2953 EMIT_ASM32 (i386_swap,
2954 "mov %eax,%ecx\n\t"
2955 "mov %ebx,%edx\n\t"
2956 "pop %eax\n\t"
2957 "pop %ebx\n\t"
2958 "push %edx\n\t"
2959 "push %ecx");
2960}
2961
2962static void
2963i386_emit_stack_adjust (int n)
2964{
2965 unsigned char buf[16];
2966 int i;
2967 CORE_ADDR buildaddr = current_insn_ptr;
2968
2969 i = 0;
2970 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2971 buf[i++] = 0x64;
2972 buf[i++] = 0x24;
2973 buf[i++] = n * 8;
2974 append_insns (&buildaddr, i, buf);
2975 current_insn_ptr = buildaddr;
2976}
2977
2978/* FN's prototype is `LONGEST(*fn)(int)'. */
2979
2980static void
2981i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2982{
2983 unsigned char buf[16];
2984 int i;
2985 CORE_ADDR buildaddr;
2986
2987 EMIT_ASM32 (i386_int_call_1_a,
2988 /* Reserve a bit of stack space. */
2989 "sub $0x8,%esp");
2990 /* Put the one argument on the stack. */
2991 buildaddr = current_insn_ptr;
2992 i = 0;
2993 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2994 buf[i++] = 0x04;
2995 buf[i++] = 0x24;
b00ad6ff 2996 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2997 i += 4;
2998 append_insns (&buildaddr, i, buf);
2999 current_insn_ptr = buildaddr;
3000 i386_emit_call (fn);
3001 EMIT_ASM32 (i386_int_call_1_c,
3002 "mov %edx,%ebx\n\t"
3003 "lea 0x8(%esp),%esp");
3004}
3005
4e29fb54 3006/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
3007
3008static void
3009i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3010{
3011 unsigned char buf[16];
3012 int i;
3013 CORE_ADDR buildaddr;
3014
3015 EMIT_ASM32 (i386_void_call_2_a,
3016 /* Preserve %eax only; we don't have to worry about %ebx. */
3017 "push %eax\n\t"
3018 /* Reserve a bit of stack space for arguments. */
3019 "sub $0x10,%esp\n\t"
3020 /* Copy "top" to the second argument position. (Note that
3021 we can't assume function won't scribble on its
3022 arguments, so don't try to restore from this.) */
3023 "mov %eax,4(%esp)\n\t"
3024 "mov %ebx,8(%esp)");
3025 /* Put the first argument on the stack. */
3026 buildaddr = current_insn_ptr;
3027 i = 0;
3028 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3029 buf[i++] = 0x04;
3030 buf[i++] = 0x24;
b00ad6ff 3031 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
3032 i += 4;
3033 append_insns (&buildaddr, i, buf);
3034 current_insn_ptr = buildaddr;
3035 i386_emit_call (fn);
3036 EMIT_ASM32 (i386_void_call_2_b,
3037 "lea 0x10(%esp),%esp\n\t"
3038 /* Restore original stack top. */
3039 "pop %eax");
3040}
3041
6b9801d4
SS
3042
3043void
3044i386_emit_eq_goto (int *offset_p, int *size_p)
3045{
3046 EMIT_ASM32 (eq,
3047 /* Check low half first, more likely to be decider */
3048 "cmpl %eax,(%esp)\n\t"
3049 "jne .Leq_fallthru\n\t"
3050 "cmpl %ebx,4(%esp)\n\t"
3051 "jne .Leq_fallthru\n\t"
3052 "lea 0x8(%esp),%esp\n\t"
3053 "pop %eax\n\t"
3054 "pop %ebx\n\t"
3055 /* jmp, but don't trust the assembler to choose the right jump */
3056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3057 ".Leq_fallthru:\n\t"
3058 "lea 0x8(%esp),%esp\n\t"
3059 "pop %eax\n\t"
3060 "pop %ebx");
3061
3062 if (offset_p)
3063 *offset_p = 18;
3064 if (size_p)
3065 *size_p = 4;
3066}
3067
3068void
3069i386_emit_ne_goto (int *offset_p, int *size_p)
3070{
3071 EMIT_ASM32 (ne,
3072 /* Check low half first, more likely to be decider */
3073 "cmpl %eax,(%esp)\n\t"
3074 "jne .Lne_jump\n\t"
3075 "cmpl %ebx,4(%esp)\n\t"
3076 "je .Lne_fallthru\n\t"
3077 ".Lne_jump:\n\t"
3078 "lea 0x8(%esp),%esp\n\t"
3079 "pop %eax\n\t"
3080 "pop %ebx\n\t"
3081 /* jmp, but don't trust the assembler to choose the right jump */
3082 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3083 ".Lne_fallthru:\n\t"
3084 "lea 0x8(%esp),%esp\n\t"
3085 "pop %eax\n\t"
3086 "pop %ebx");
3087
3088 if (offset_p)
3089 *offset_p = 18;
3090 if (size_p)
3091 *size_p = 4;
3092}
3093
3094void
3095i386_emit_lt_goto (int *offset_p, int *size_p)
3096{
3097 EMIT_ASM32 (lt,
3098 "cmpl %ebx,4(%esp)\n\t"
3099 "jl .Llt_jump\n\t"
3100 "jne .Llt_fallthru\n\t"
3101 "cmpl %eax,(%esp)\n\t"
3102 "jnl .Llt_fallthru\n\t"
3103 ".Llt_jump:\n\t"
3104 "lea 0x8(%esp),%esp\n\t"
3105 "pop %eax\n\t"
3106 "pop %ebx\n\t"
3107 /* jmp, but don't trust the assembler to choose the right jump */
3108 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3109 ".Llt_fallthru:\n\t"
3110 "lea 0x8(%esp),%esp\n\t"
3111 "pop %eax\n\t"
3112 "pop %ebx");
3113
3114 if (offset_p)
3115 *offset_p = 20;
3116 if (size_p)
3117 *size_p = 4;
3118}
3119
3120void
3121i386_emit_le_goto (int *offset_p, int *size_p)
3122{
3123 EMIT_ASM32 (le,
3124 "cmpl %ebx,4(%esp)\n\t"
3125 "jle .Lle_jump\n\t"
3126 "jne .Lle_fallthru\n\t"
3127 "cmpl %eax,(%esp)\n\t"
3128 "jnle .Lle_fallthru\n\t"
3129 ".Lle_jump:\n\t"
3130 "lea 0x8(%esp),%esp\n\t"
3131 "pop %eax\n\t"
3132 "pop %ebx\n\t"
3133 /* jmp, but don't trust the assembler to choose the right jump */
3134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3135 ".Lle_fallthru:\n\t"
3136 "lea 0x8(%esp),%esp\n\t"
3137 "pop %eax\n\t"
3138 "pop %ebx");
3139
3140 if (offset_p)
3141 *offset_p = 20;
3142 if (size_p)
3143 *size_p = 4;
3144}
3145
3146void
3147i386_emit_gt_goto (int *offset_p, int *size_p)
3148{
3149 EMIT_ASM32 (gt,
3150 "cmpl %ebx,4(%esp)\n\t"
3151 "jg .Lgt_jump\n\t"
3152 "jne .Lgt_fallthru\n\t"
3153 "cmpl %eax,(%esp)\n\t"
3154 "jng .Lgt_fallthru\n\t"
3155 ".Lgt_jump:\n\t"
3156 "lea 0x8(%esp),%esp\n\t"
3157 "pop %eax\n\t"
3158 "pop %ebx\n\t"
3159 /* jmp, but don't trust the assembler to choose the right jump */
3160 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3161 ".Lgt_fallthru:\n\t"
3162 "lea 0x8(%esp),%esp\n\t"
3163 "pop %eax\n\t"
3164 "pop %ebx");
3165
3166 if (offset_p)
3167 *offset_p = 20;
3168 if (size_p)
3169 *size_p = 4;
3170}
3171
3172void
3173i386_emit_ge_goto (int *offset_p, int *size_p)
3174{
3175 EMIT_ASM32 (ge,
3176 "cmpl %ebx,4(%esp)\n\t"
3177 "jge .Lge_jump\n\t"
3178 "jne .Lge_fallthru\n\t"
3179 "cmpl %eax,(%esp)\n\t"
3180 "jnge .Lge_fallthru\n\t"
3181 ".Lge_jump:\n\t"
3182 "lea 0x8(%esp),%esp\n\t"
3183 "pop %eax\n\t"
3184 "pop %ebx\n\t"
3185 /* jmp, but don't trust the assembler to choose the right jump */
3186 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3187 ".Lge_fallthru:\n\t"
3188 "lea 0x8(%esp),%esp\n\t"
3189 "pop %eax\n\t"
3190 "pop %ebx");
3191
3192 if (offset_p)
3193 *offset_p = 20;
3194 if (size_p)
3195 *size_p = 4;
3196}
3197
6a271cae
PA
3198struct emit_ops i386_emit_ops =
3199 {
3200 i386_emit_prologue,
3201 i386_emit_epilogue,
3202 i386_emit_add,
3203 i386_emit_sub,
3204 i386_emit_mul,
3205 i386_emit_lsh,
3206 i386_emit_rsh_signed,
3207 i386_emit_rsh_unsigned,
3208 i386_emit_ext,
3209 i386_emit_log_not,
3210 i386_emit_bit_and,
3211 i386_emit_bit_or,
3212 i386_emit_bit_xor,
3213 i386_emit_bit_not,
3214 i386_emit_equal,
3215 i386_emit_less_signed,
3216 i386_emit_less_unsigned,
3217 i386_emit_ref,
3218 i386_emit_if_goto,
3219 i386_emit_goto,
3220 i386_write_goto_address,
3221 i386_emit_const,
3222 i386_emit_call,
3223 i386_emit_reg,
3224 i386_emit_pop,
3225 i386_emit_stack_flush,
3226 i386_emit_zero_ext,
3227 i386_emit_swap,
3228 i386_emit_stack_adjust,
3229 i386_emit_int_call_1,
6b9801d4
SS
3230 i386_emit_void_call_2,
3231 i386_emit_eq_goto,
3232 i386_emit_ne_goto,
3233 i386_emit_lt_goto,
3234 i386_emit_le_goto,
3235 i386_emit_gt_goto,
3236 i386_emit_ge_goto
6a271cae
PA
3237 };
3238
3239
3240static struct emit_ops *
3241x86_emit_ops (void)
3242{
3243#ifdef __x86_64__
3aee8918 3244 if (is_64bit_tdesc ())
6a271cae
PA
3245 return &amd64_emit_ops;
3246 else
3247#endif
3248 return &i386_emit_ops;
3249}
3250
c2d6af84
PA
3251static int
3252x86_supports_range_stepping (void)
3253{
3254 return 1;
3255}
3256
d0722149
DE
3257/* This is initialized assuming an amd64 target.
3258 x86_arch_setup will correct it for i386 or amd64 targets. */
3259
3260struct linux_target_ops the_low_target =
3261{
3262 x86_arch_setup,
3aee8918
PA
3263 x86_linux_regs_info,
3264 x86_cannot_fetch_register,
3265 x86_cannot_store_register,
c14dfd32 3266 NULL, /* fetch_register */
d0722149
DE
3267 x86_get_pc,
3268 x86_set_pc,
3269 x86_breakpoint,
3270 x86_breakpoint_len,
3271 NULL,
3272 1,
3273 x86_breakpoint_at,
802e8e6d 3274 x86_supports_z_point_type,
aa5ca48f
DE
3275 x86_insert_point,
3276 x86_remove_point,
3277 x86_stopped_by_watchpoint,
3278 x86_stopped_data_address,
d0722149
DE
3279 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3280 native i386 case (no registers smaller than an xfer unit), and are not
3281 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3282 NULL,
3283 NULL,
3284 /* need to fix up i386 siginfo if host is amd64 */
3285 x86_siginfo_fixup,
aa5ca48f
DE
3286 x86_linux_new_process,
3287 x86_linux_new_thread,
1570b33e 3288 x86_linux_prepare_to_resume,
219f2f23 3289 x86_linux_process_qsupported,
fa593d66
PA
3290 x86_supports_tracepoints,
3291 x86_get_thread_area,
6a271cae 3292 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
3293 x86_emit_ops,
3294 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 3295 x86_supports_range_stepping,
d0722149 3296};
3aee8918
PA
3297
3298void
3299initialize_low_arch (void)
3300{
3301 /* Initialize the Linux target descriptions. */
3302#ifdef __x86_64__
3303 init_registers_amd64_linux ();
3304 init_registers_amd64_avx_linux ();
01f9f808 3305 init_registers_amd64_avx512_linux ();
a196ebeb
WT
3306 init_registers_amd64_mpx_linux ();
3307
3aee8918 3308 init_registers_x32_linux ();
7e5aaa09 3309 init_registers_x32_avx_linux ();
01f9f808 3310 init_registers_x32_avx512_linux ();
3aee8918
PA
3311
3312 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3313 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3314 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3315#endif
3316 init_registers_i386_linux ();
3317 init_registers_i386_mmx_linux ();
3318 init_registers_i386_avx_linux ();
01f9f808 3319 init_registers_i386_avx512_linux ();
a196ebeb 3320 init_registers_i386_mpx_linux ();
3aee8918
PA
3321
3322 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3323 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3324 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3325
3326 initialize_regsets_info (&x86_regsets_info);
3327}
This page took 0.635174 seconds and 4 git commands to generate.