On MS-Windows, define _WIN32_WINNT in a single common place.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
42a4f53d 3 Copyright (C) 2002-2019 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
0747795c 27#include "common/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
0747795c 41#include "common/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
aa5ca48f
DE
95/* Per-process arch-specific data we want to keep. */
96
97struct arch_process_info
98{
df7e5265 99 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
100};
101
d0722149
DE
102#ifdef __x86_64__
103
104/* Mapping between the general-purpose registers in `struct user'
105 format and GDB's register array layout.
106 Note that the transfer layout uses 64-bit regs. */
107static /*const*/ int i386_regmap[] =
108{
109 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
110 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
111 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
112 DS * 8, ES * 8, FS * 8, GS * 8
113};
114
115#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
116
117/* So code below doesn't have to care, i386 or amd64. */
118#define ORIG_EAX ORIG_RAX
bc9540e8 119#define REGSIZE 8
d0722149
DE
120
121static const int x86_64_regmap[] =
122{
123 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
124 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
125 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
126 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
127 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
128 DS * 8, ES * 8, FS * 8, GS * 8,
129 -1, -1, -1, -1, -1, -1, -1, -1,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
132 -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 ORIG_RAX * 8,
2735833d
WT
135#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
136 21 * 8, 22 * 8,
137#else
138 -1, -1,
139#endif
a196ebeb 140 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
141 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
142 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1,
144 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
147 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
148 -1, -1, -1, -1, -1, -1, -1, -1,
149 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
150 -1, -1, -1, -1, -1, -1, -1, -1,
151 -1 /* pkru */
d0722149
DE
152};
153
154#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 155#define X86_64_USER_REGS (GS + 1)
d0722149
DE
156
157#else /* ! __x86_64__ */
158
159/* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout. */
161static /*const*/ int i386_regmap[] =
162{
163 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
164 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
165 EIP * 4, EFL * 4, CS * 4, SS * 4,
166 DS * 4, ES * 4, FS * 4, GS * 4
167};
168
169#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
bc9540e8
PA
171#define REGSIZE 4
172
d0722149 173#endif
3aee8918
PA
174
175#ifdef __x86_64__
176
177/* Returns true if the current inferior belongs to a x86-64 process,
178 per the tdesc. */
179
180static int
181is_64bit_tdesc (void)
182{
0bfdf32f 183 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
184
185 return register_size (regcache->tdesc, 0) == 8;
186}
187
188#endif
189
d0722149
DE
190\f
191/* Called by libthread_db. */
192
193ps_err_e
754653a7 194ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
195 lwpid_t lwpid, int idx, void **base)
196{
197#ifdef __x86_64__
3aee8918 198 int use_64bit = is_64bit_tdesc ();
d0722149
DE
199
200 if (use_64bit)
201 {
202 switch (idx)
203 {
204 case FS:
205 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
206 return PS_OK;
207 break;
208 case GS:
209 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
210 return PS_OK;
211 break;
212 default:
213 return PS_BADADDR;
214 }
215 return PS_ERR;
216 }
217#endif
218
219 {
220 unsigned int desc[4];
221
222 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
223 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
224 return PS_ERR;
225
d1ec4ce7
DE
226 /* Ensure we properly extend the value to 64-bits for x86_64. */
227 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
228 return PS_OK;
229 }
230}
fa593d66
PA
231
232/* Get the thread area address. This is used to recognize which
233 thread is which when tracing with the in-process agent library. We
234 don't read anything from the address, and treat it as opaque; it's
235 the address itself that we assume is unique per-thread. */
236
237static int
238x86_get_thread_area (int lwpid, CORE_ADDR *addr)
239{
240#ifdef __x86_64__
3aee8918 241 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
242
243 if (use_64bit)
244 {
245 void *base;
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
247 {
248 *addr = (CORE_ADDR) (uintptr_t) base;
249 return 0;
250 }
251
252 return -1;
253 }
254#endif
255
256 {
f2907e49 257 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
258 struct thread_info *thr = get_lwp_thread (lwp);
259 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
260 unsigned int desc[4];
261 ULONGEST gs = 0;
262 const int reg_thread_area = 3; /* bits to scale down register value. */
263 int idx;
264
265 collect_register_by_name (regcache, "gs", &gs);
266
267 idx = gs >> reg_thread_area;
268
269 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 270 lwpid_of (thr),
493e2a69 271 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
272 return -1;
273
274 *addr = desc[1];
275 return 0;
276 }
277}
278
279
d0722149
DE
280\f
281static int
3aee8918 282x86_cannot_store_register (int regno)
d0722149 283{
3aee8918
PA
284#ifdef __x86_64__
285 if (is_64bit_tdesc ())
286 return 0;
287#endif
288
d0722149
DE
289 return regno >= I386_NUM_REGS;
290}
291
292static int
3aee8918 293x86_cannot_fetch_register (int regno)
d0722149 294{
3aee8918
PA
295#ifdef __x86_64__
296 if (is_64bit_tdesc ())
297 return 0;
298#endif
299
d0722149
DE
300 return regno >= I386_NUM_REGS;
301}
302
303static void
442ea881 304x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
305{
306 int i;
307
308#ifdef __x86_64__
3aee8918 309 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
310 {
311 for (i = 0; i < X86_64_NUM_REGS; i++)
312 if (x86_64_regmap[i] != -1)
442ea881 313 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
314
315#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
316 {
317 unsigned long base;
318 int lwpid = lwpid_of (current_thread);
319
320 collect_register_by_name (regcache, "fs_base", &base);
321 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
322
323 collect_register_by_name (regcache, "gs_base", &base);
324 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
325 }
326#endif
327
d0722149
DE
328 return;
329 }
9e0aa64f
JK
330
331 /* 32-bit inferior registers need to be zero-extended.
332 Callers would read uninitialized memory otherwise. */
333 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
334#endif
335
336 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 337 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 338
442ea881 339 collect_register_by_name (regcache, "orig_eax",
bc9540e8 340 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc
KB
341
342 /* Sign extend EAX value to avoid potential syscall restart
343 problems.
344
345 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
346 for a detailed explanation. */
347 if (register_size (regcache->tdesc, 0) == 4)
348 {
349 void *ptr = ((gdb_byte *) buf
350 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
351
352 *(int64_t *) ptr = *(int32_t *) ptr;
353 }
d0722149
DE
354}
355
356static void
442ea881 357x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
358{
359 int i;
360
361#ifdef __x86_64__
3aee8918 362 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
363 {
364 for (i = 0; i < X86_64_NUM_REGS; i++)
365 if (x86_64_regmap[i] != -1)
442ea881 366 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
367
368#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
369 {
370 unsigned long base;
371 int lwpid = lwpid_of (current_thread);
372
373 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
374 supply_register_by_name (regcache, "fs_base", &base);
375
376 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
377 supply_register_by_name (regcache, "gs_base", &base);
378 }
379#endif
d0722149
DE
380 return;
381 }
382#endif
383
384 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 385 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 386
442ea881 387 supply_register_by_name (regcache, "orig_eax",
bc9540e8 388 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
389}
390
391static void
442ea881 392x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
393{
394#ifdef __x86_64__
442ea881 395 i387_cache_to_fxsave (regcache, buf);
d0722149 396#else
442ea881 397 i387_cache_to_fsave (regcache, buf);
d0722149
DE
398#endif
399}
400
401static void
442ea881 402x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
403{
404#ifdef __x86_64__
442ea881 405 i387_fxsave_to_cache (regcache, buf);
d0722149 406#else
442ea881 407 i387_fsave_to_cache (regcache, buf);
d0722149
DE
408#endif
409}
410
411#ifndef __x86_64__
412
413static void
442ea881 414x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 415{
442ea881 416 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
417}
418
419static void
442ea881 420x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 421{
442ea881 422 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
423}
424
425#endif
426
1570b33e
L
427static void
428x86_fill_xstateregset (struct regcache *regcache, void *buf)
429{
430 i387_cache_to_xsave (regcache, buf);
431}
432
433static void
434x86_store_xstateregset (struct regcache *regcache, const void *buf)
435{
436 i387_xsave_to_cache (regcache, buf);
437}
438
d0722149
DE
439/* ??? The non-biarch i386 case stores all the i387 regs twice.
440 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
441 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
442 doesn't work. IWBN to avoid the duplication in the case where it
443 does work. Maybe the arch_setup routine could check whether it works
3aee8918 444 and update the supported regsets accordingly. */
d0722149 445
3aee8918 446static struct regset_info x86_regsets[] =
d0722149
DE
447{
448#ifdef HAVE_PTRACE_GETREGS
1570b33e 449 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
450 GENERAL_REGS,
451 x86_fill_gregset, x86_store_gregset },
1570b33e
L
452 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
453 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
454# ifndef __x86_64__
455# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 456 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
457 EXTENDED_REGS,
458 x86_fill_fpxregset, x86_store_fpxregset },
459# endif
460# endif
1570b33e 461 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
462 FP_REGS,
463 x86_fill_fpregset, x86_store_fpregset },
464#endif /* HAVE_PTRACE_GETREGS */
50bc912a 465 NULL_REGSET
d0722149
DE
466};
467
468static CORE_ADDR
442ea881 469x86_get_pc (struct regcache *regcache)
d0722149 470{
3aee8918 471 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
472
473 if (use_64bit)
474 {
6598661d
PA
475 uint64_t pc;
476
442ea881 477 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
478 return (CORE_ADDR) pc;
479 }
480 else
481 {
6598661d
PA
482 uint32_t pc;
483
442ea881 484 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
485 return (CORE_ADDR) pc;
486 }
487}
488
489static void
442ea881 490x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149 491{
3aee8918 492 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
493
494 if (use_64bit)
495 {
6598661d
PA
496 uint64_t newpc = pc;
497
442ea881 498 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
499 }
500 else
501 {
6598661d
PA
502 uint32_t newpc = pc;
503
442ea881 504 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
505 }
506}
507\f
dd373349 508static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
509#define x86_breakpoint_len 1
510
511static int
512x86_breakpoint_at (CORE_ADDR pc)
513{
514 unsigned char c;
515
fc7238bb 516 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
517 if (c == 0xCC)
518 return 1;
519
520 return 0;
521}
522\f
42995dbd 523/* Low-level function vector. */
df7e5265 524struct x86_dr_low_type x86_dr_low =
42995dbd 525 {
d33472ad
GB
526 x86_linux_dr_set_control,
527 x86_linux_dr_set_addr,
528 x86_linux_dr_get_addr,
529 x86_linux_dr_get_status,
530 x86_linux_dr_get_control,
42995dbd
GB
531 sizeof (void *),
532 };
aa5ca48f 533\f
90d74c30 534/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
535
536static int
802e8e6d
PA
537x86_supports_z_point_type (char z_type)
538{
539 switch (z_type)
540 {
541 case Z_PACKET_SW_BP:
542 case Z_PACKET_HW_BP:
543 case Z_PACKET_WRITE_WP:
544 case Z_PACKET_ACCESS_WP:
545 return 1;
546 default:
547 return 0;
548 }
549}
550
551static int
552x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
553 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
554{
555 struct process_info *proc = current_process ();
802e8e6d 556
aa5ca48f
DE
557 switch (type)
558 {
802e8e6d
PA
559 case raw_bkpt_type_hw:
560 case raw_bkpt_type_write_wp:
561 case raw_bkpt_type_access_wp:
a4165e94 562 {
802e8e6d
PA
563 enum target_hw_bp_type hw_type
564 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 565 struct x86_debug_reg_state *state
fe978cb0 566 = &proc->priv->arch_private->debug_reg_state;
a4165e94 567
df7e5265 568 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 569 }
961bd387 570
aa5ca48f
DE
571 default:
572 /* Unsupported. */
573 return 1;
574 }
575}
576
577static int
802e8e6d
PA
578x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
579 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
580{
581 struct process_info *proc = current_process ();
802e8e6d 582
aa5ca48f
DE
583 switch (type)
584 {
802e8e6d
PA
585 case raw_bkpt_type_hw:
586 case raw_bkpt_type_write_wp:
587 case raw_bkpt_type_access_wp:
a4165e94 588 {
802e8e6d
PA
589 enum target_hw_bp_type hw_type
590 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 591 struct x86_debug_reg_state *state
fe978cb0 592 = &proc->priv->arch_private->debug_reg_state;
a4165e94 593
df7e5265 594 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 595 }
aa5ca48f
DE
596 default:
597 /* Unsupported. */
598 return 1;
599 }
600}
601
602static int
603x86_stopped_by_watchpoint (void)
604{
605 struct process_info *proc = current_process ();
fe978cb0 606 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
607}
608
609static CORE_ADDR
610x86_stopped_data_address (void)
611{
612 struct process_info *proc = current_process ();
613 CORE_ADDR addr;
fe978cb0 614 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 615 &addr))
aa5ca48f
DE
616 return addr;
617 return 0;
618}
619\f
620/* Called when a new process is created. */
621
622static struct arch_process_info *
623x86_linux_new_process (void)
624{
ed859da7 625 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 626
df7e5265 627 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
628
629 return info;
630}
631
04ec7890
SM
632/* Called when a process is being deleted. */
633
634static void
635x86_linux_delete_process (struct arch_process_info *info)
636{
637 xfree (info);
638}
639
3a8a0396
DB
640/* Target routine for linux_new_fork. */
641
642static void
643x86_linux_new_fork (struct process_info *parent, struct process_info *child)
644{
645 /* These are allocated by linux_add_process. */
646 gdb_assert (parent->priv != NULL
647 && parent->priv->arch_private != NULL);
648 gdb_assert (child->priv != NULL
649 && child->priv->arch_private != NULL);
650
651 /* Linux kernel before 2.6.33 commit
652 72f674d203cd230426437cdcf7dd6f681dad8b0d
653 will inherit hardware debug registers from parent
654 on fork/vfork/clone. Newer Linux kernels create such tasks with
655 zeroed debug registers.
656
657 GDB core assumes the child inherits the watchpoints/hw
658 breakpoints of the parent, and will remove them all from the
659 forked off process. Copy the debug registers mirrors into the
660 new process so that all breakpoints and watchpoints can be
661 removed together. The debug registers mirror will become zeroed
662 in the end before detaching the forked off process, thus making
663 this compatible with older Linux kernels too. */
664
665 *child->priv->arch_private = *parent->priv->arch_private;
666}
667
70a0bb6b
GB
668/* See nat/x86-dregs.h. */
669
670struct x86_debug_reg_state *
671x86_debug_reg_state (pid_t pid)
672{
673 struct process_info *proc = find_process_pid (pid);
674
675 return &proc->priv->arch_private->debug_reg_state;
676}
aa5ca48f 677\f
d0722149
DE
678/* When GDBSERVER is built as a 64-bit application on linux, the
679 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
680 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
681 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
682 conversion in-place ourselves. */
683
9cf12d57 684/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
685 layout of the inferiors' architecture. Returns true if any
686 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 687 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
688 INF. */
689
690static int
9cf12d57 691x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
692{
693#ifdef __x86_64__
760256f9 694 unsigned int machine;
0bfdf32f 695 int tid = lwpid_of (current_thread);
760256f9
PA
696 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
697
d0722149 698 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 699 if (!is_64bit_tdesc ())
9cf12d57 700 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 701 FIXUP_32);
c92b5177 702 /* No fixup for native x32 GDB. */
760256f9 703 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 704 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 705 FIXUP_X32);
d0722149
DE
706#endif
707
708 return 0;
709}
710\f
1570b33e
L
711static int use_xml;
712
3aee8918
PA
713/* Format of XSAVE extended state is:
714 struct
715 {
716 fxsave_bytes[0..463]
717 sw_usable_bytes[464..511]
718 xstate_hdr_bytes[512..575]
719 avx_bytes[576..831]
720 future_state etc
721 };
722
723 Same memory layout will be used for the coredump NT_X86_XSTATE
724 representing the XSAVE extended state registers.
725
726 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
727 extended state mask, which is the same as the extended control register
728 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
729 together with the mask saved in the xstate_hdr_bytes to determine what
730 states the processor/OS supports and what state, used or initialized,
731 the process/thread is in. */
732#define I386_LINUX_XSAVE_XCR0_OFFSET 464
733
734/* Does the current host support the GETFPXREGS request? The header
735 file may or may not define it, and even if it is defined, the
736 kernel will return EIO if it's running on a pre-SSE processor. */
737int have_ptrace_getfpxregs =
738#ifdef HAVE_PTRACE_GETFPXREGS
739 -1
740#else
741 0
742#endif
743;
1570b33e 744
3aee8918
PA
745/* Get Linux/x86 target description from running target. */
746
747static const struct target_desc *
748x86_linux_read_description (void)
1570b33e 749{
3aee8918
PA
750 unsigned int machine;
751 int is_elf64;
a196ebeb 752 int xcr0_features;
3aee8918
PA
753 int tid;
754 static uint64_t xcr0;
3a13a53b 755 struct regset_info *regset;
1570b33e 756
0bfdf32f 757 tid = lwpid_of (current_thread);
1570b33e 758
3aee8918 759 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 760
3aee8918 761 if (sizeof (void *) == 4)
3a13a53b 762 {
3aee8918
PA
763 if (is_elf64 > 0)
764 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
765#ifndef __x86_64__
766 else if (machine == EM_X86_64)
767 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
768#endif
769 }
3a13a53b 770
3aee8918
PA
771#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
772 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
773 {
774 elf_fpxregset_t fpxregs;
3a13a53b 775
3aee8918 776 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 777 {
3aee8918
PA
778 have_ptrace_getfpxregs = 0;
779 have_ptrace_getregset = 0;
f49ff000 780 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 781 }
3aee8918
PA
782 else
783 have_ptrace_getfpxregs = 1;
3a13a53b 784 }
1570b33e
L
785#endif
786
787 if (!use_xml)
788 {
df7e5265 789 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 790
1570b33e
L
791 /* Don't use XML. */
792#ifdef __x86_64__
3aee8918
PA
793 if (machine == EM_X86_64)
794 return tdesc_amd64_linux_no_xml;
1570b33e 795 else
1570b33e 796#endif
3aee8918 797 return tdesc_i386_linux_no_xml;
1570b33e
L
798 }
799
1570b33e
L
800 if (have_ptrace_getregset == -1)
801 {
df7e5265 802 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 803 struct iovec iov;
1570b33e
L
804
805 iov.iov_base = xstateregs;
806 iov.iov_len = sizeof (xstateregs);
807
808 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
809 if (ptrace (PTRACE_GETREGSET, tid,
810 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
811 have_ptrace_getregset = 0;
812 else
1570b33e 813 {
3aee8918
PA
814 have_ptrace_getregset = 1;
815
816 /* Get XCR0 from XSAVE extended state. */
817 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
818 / sizeof (uint64_t))];
819
820 /* Use PTRACE_GETREGSET if it is available. */
821 for (regset = x86_regsets;
822 regset->fill_function != NULL; regset++)
823 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 824 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
825 else if (regset->type != GENERAL_REGS)
826 regset->size = 0;
1570b33e 827 }
1570b33e
L
828 }
829
3aee8918 830 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 831 xcr0_features = (have_ptrace_getregset
2e1e43e1 832 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 833
a196ebeb 834 if (xcr0_features)
3aee8918 835 x86_xcr0 = xcr0;
1570b33e 836
3aee8918
PA
837 if (machine == EM_X86_64)
838 {
1570b33e 839#ifdef __x86_64__
b4570e4b 840 const target_desc *tdesc = NULL;
a196ebeb 841
b4570e4b 842 if (xcr0_features)
3aee8918 843 {
b4570e4b
YQ
844 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
845 !is_elf64);
1570b33e 846 }
b4570e4b
YQ
847
848 if (tdesc == NULL)
849 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
850 return tdesc;
3aee8918 851#endif
1570b33e 852 }
3aee8918
PA
853 else
854 {
f49ff000 855 const target_desc *tdesc = NULL;
a1fa17ee 856
f49ff000
YQ
857 if (xcr0_features)
858 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 859
f49ff000
YQ
860 if (tdesc == NULL)
861 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 862
f49ff000 863 return tdesc;
3aee8918
PA
864 }
865
866 gdb_assert_not_reached ("failed to return tdesc");
867}
868
3aee8918
PA
869/* Update all the target description of all processes; a new GDB
870 connected, and it may or not support xml target descriptions. */
871
872static void
873x86_linux_update_xmltarget (void)
874{
0bfdf32f 875 struct thread_info *saved_thread = current_thread;
3aee8918
PA
876
877 /* Before changing the register cache's internal layout, flush the
878 contents of the current valid caches back to the threads, and
879 release the current regcache objects. */
880 regcache_release ();
881
9179355e
SM
882 for_each_process ([] (process_info *proc) {
883 int pid = proc->pid;
884
885 /* Look up any thread of this process. */
886 current_thread = find_any_thread_of_pid (pid);
887
888 the_low_target.arch_setup ();
889 });
3aee8918 890
0bfdf32f 891 current_thread = saved_thread;
1570b33e
L
892}
893
894/* Process qSupported query, "xmlRegisters=". Update the buffer size for
895 PTRACE_GETREGSET. */
896
897static void
06e03fff 898x86_linux_process_qsupported (char **features, int count)
1570b33e 899{
06e03fff
PA
900 int i;
901
1570b33e
L
902 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
903 with "i386" in qSupported query, it supports x86 XML target
904 descriptions. */
905 use_xml = 0;
06e03fff 906 for (i = 0; i < count; i++)
1570b33e 907 {
06e03fff 908 const char *feature = features[i];
1570b33e 909
06e03fff 910 if (startswith (feature, "xmlRegisters="))
1570b33e 911 {
06e03fff
PA
912 char *copy = xstrdup (feature + 13);
913 char *p;
914
915 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1570b33e 916 {
06e03fff
PA
917 if (strcmp (p, "i386") == 0)
918 {
919 use_xml = 1;
920 break;
921 }
1570b33e 922 }
1570b33e 923
06e03fff
PA
924 free (copy);
925 }
1570b33e 926 }
1570b33e
L
927 x86_linux_update_xmltarget ();
928}
929
3aee8918 930/* Common for x86/x86-64. */
d0722149 931
3aee8918
PA
932static struct regsets_info x86_regsets_info =
933 {
934 x86_regsets, /* regsets */
935 0, /* num_regsets */
936 NULL, /* disabled_regsets */
937 };
214d508e
L
938
939#ifdef __x86_64__
3aee8918
PA
940static struct regs_info amd64_linux_regs_info =
941 {
942 NULL, /* regset_bitmap */
943 NULL, /* usrregs_info */
944 &x86_regsets_info
945 };
d0722149 946#endif
3aee8918
PA
947static struct usrregs_info i386_linux_usrregs_info =
948 {
949 I386_NUM_REGS,
950 i386_regmap,
951 };
d0722149 952
3aee8918
PA
953static struct regs_info i386_linux_regs_info =
954 {
955 NULL, /* regset_bitmap */
956 &i386_linux_usrregs_info,
957 &x86_regsets_info
958 };
d0722149 959
3aee8918
PA
960const struct regs_info *
961x86_linux_regs_info (void)
962{
963#ifdef __x86_64__
964 if (is_64bit_tdesc ())
965 return &amd64_linux_regs_info;
966 else
967#endif
968 return &i386_linux_regs_info;
969}
d0722149 970
3aee8918
PA
971/* Initialize the target description for the architecture of the
972 inferior. */
1570b33e 973
3aee8918
PA
974static void
975x86_arch_setup (void)
976{
977 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
978}
979
82075af2
JS
980/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
981 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
982
983static void
4cc32bec 984x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
985{
986 int use_64bit = register_size (regcache->tdesc, 0) == 8;
987
988 if (use_64bit)
989 {
990 long l_sysno;
82075af2
JS
991
992 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 993 *sysno = (int) l_sysno;
82075af2
JS
994 }
995 else
4cc32bec 996 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
997}
998
219f2f23
PA
999static int
1000x86_supports_tracepoints (void)
1001{
1002 return 1;
1003}
1004
fa593d66
PA
1005static void
1006append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1007{
1008 write_inferior_memory (*to, buf, len);
1009 *to += len;
1010}
1011
1012static int
a121b7c1 1013push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1014{
1015 unsigned char *buf_org = buf;
1016
1017 while (1)
1018 {
1019 char *endptr;
1020 unsigned long ul = strtoul (op, &endptr, 16);
1021
1022 if (endptr == op)
1023 break;
1024
1025 *buf++ = ul;
1026 op = endptr;
1027 }
1028
1029 return buf - buf_org;
1030}
1031
1032#ifdef __x86_64__
1033
1034/* Build a jump pad that saves registers and calls a collection
1035 function. Writes a jump instruction to the jump pad to
1036 JJUMPAD_INSN. The caller is responsible to write it in at the
1037 tracepoint address. */
1038
1039static int
1040amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1041 CORE_ADDR collector,
1042 CORE_ADDR lockaddr,
1043 ULONGEST orig_size,
1044 CORE_ADDR *jump_entry,
405f8e94
SS
1045 CORE_ADDR *trampoline,
1046 ULONGEST *trampoline_size,
fa593d66
PA
1047 unsigned char *jjump_pad_insn,
1048 ULONGEST *jjump_pad_insn_size,
1049 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1050 CORE_ADDR *adjusted_insn_addr_end,
1051 char *err)
fa593d66
PA
1052{
1053 unsigned char buf[40];
1054 int i, offset;
f4647387
YQ
1055 int64_t loffset;
1056
fa593d66
PA
1057 CORE_ADDR buildaddr = *jump_entry;
1058
1059 /* Build the jump pad. */
1060
1061 /* First, do tracepoint data collection. Save registers. */
1062 i = 0;
1063 /* Need to ensure stack pointer saved first. */
1064 buf[i++] = 0x54; /* push %rsp */
1065 buf[i++] = 0x55; /* push %rbp */
1066 buf[i++] = 0x57; /* push %rdi */
1067 buf[i++] = 0x56; /* push %rsi */
1068 buf[i++] = 0x52; /* push %rdx */
1069 buf[i++] = 0x51; /* push %rcx */
1070 buf[i++] = 0x53; /* push %rbx */
1071 buf[i++] = 0x50; /* push %rax */
1072 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1073 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1074 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1075 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1076 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1077 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1078 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1079 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1080 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1081 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1082 buf[i++] = 0xbf;
c8ef42ee
PA
1083 memcpy (buf + i, &tpaddr, 8);
1084 i += 8;
fa593d66
PA
1085 buf[i++] = 0x57; /* push %rdi */
1086 append_insns (&buildaddr, i, buf);
1087
1088 /* Stack space for the collecting_t object. */
1089 i = 0;
1090 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1091 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1092 memcpy (buf + i, &tpoint, 8);
1093 i += 8;
1094 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1095 i += push_opcode (&buf[i],
1096 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1097 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1098 append_insns (&buildaddr, i, buf);
1099
1100 /* spin-lock. */
1101 i = 0;
1102 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1103 memcpy (&buf[i], (void *) &lockaddr, 8);
1104 i += 8;
1105 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1106 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1107 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1108 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1109 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1110 append_insns (&buildaddr, i, buf);
1111
1112 /* Set up the gdb_collect call. */
1113 /* At this point, (stack pointer + 0x18) is the base of our saved
1114 register block. */
1115
1116 i = 0;
1117 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1118 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1119
1120 /* tpoint address may be 64-bit wide. */
1121 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1122 memcpy (buf + i, &tpoint, 8);
1123 i += 8;
1124 append_insns (&buildaddr, i, buf);
1125
1126 /* The collector function being in the shared library, may be
1127 >31-bits away off the jump pad. */
1128 i = 0;
1129 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1130 memcpy (buf + i, &collector, 8);
1131 i += 8;
1132 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1133 append_insns (&buildaddr, i, buf);
1134
1135 /* Clear the spin-lock. */
1136 i = 0;
1137 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1138 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1139 memcpy (buf + i, &lockaddr, 8);
1140 i += 8;
1141 append_insns (&buildaddr, i, buf);
1142
1143 /* Remove stack that had been used for the collect_t object. */
1144 i = 0;
1145 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1146 append_insns (&buildaddr, i, buf);
1147
1148 /* Restore register state. */
1149 i = 0;
1150 buf[i++] = 0x48; /* add $0x8,%rsp */
1151 buf[i++] = 0x83;
1152 buf[i++] = 0xc4;
1153 buf[i++] = 0x08;
1154 buf[i++] = 0x9d; /* popfq */
1155 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1156 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1157 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1158 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1159 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1160 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1161 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1162 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1163 buf[i++] = 0x58; /* pop %rax */
1164 buf[i++] = 0x5b; /* pop %rbx */
1165 buf[i++] = 0x59; /* pop %rcx */
1166 buf[i++] = 0x5a; /* pop %rdx */
1167 buf[i++] = 0x5e; /* pop %rsi */
1168 buf[i++] = 0x5f; /* pop %rdi */
1169 buf[i++] = 0x5d; /* pop %rbp */
1170 buf[i++] = 0x5c; /* pop %rsp */
1171 append_insns (&buildaddr, i, buf);
1172
1173 /* Now, adjust the original instruction to execute in the jump
1174 pad. */
1175 *adjusted_insn_addr = buildaddr;
1176 relocate_instruction (&buildaddr, tpaddr);
1177 *adjusted_insn_addr_end = buildaddr;
1178
1179 /* Finally, write a jump back to the program. */
f4647387
YQ
1180
1181 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1182 if (loffset > INT_MAX || loffset < INT_MIN)
1183 {
1184 sprintf (err,
1185 "E.Jump back from jump pad too far from tracepoint "
1186 "(offset 0x%" PRIx64 " > int32).", loffset);
1187 return 1;
1188 }
1189
1190 offset = (int) loffset;
fa593d66
PA
1191 memcpy (buf, jump_insn, sizeof (jump_insn));
1192 memcpy (buf + 1, &offset, 4);
1193 append_insns (&buildaddr, sizeof (jump_insn), buf);
1194
1195 /* The jump pad is now built. Wire in a jump to our jump pad. This
1196 is always done last (by our caller actually), so that we can
1197 install fast tracepoints with threads running. This relies on
1198 the agent's atomic write support. */
f4647387
YQ
1199 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1200 if (loffset > INT_MAX || loffset < INT_MIN)
1201 {
1202 sprintf (err,
1203 "E.Jump pad too far from tracepoint "
1204 "(offset 0x%" PRIx64 " > int32).", loffset);
1205 return 1;
1206 }
1207
1208 offset = (int) loffset;
1209
fa593d66
PA
1210 memcpy (buf, jump_insn, sizeof (jump_insn));
1211 memcpy (buf + 1, &offset, 4);
1212 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1213 *jjump_pad_insn_size = sizeof (jump_insn);
1214
1215 /* Return the end address of our pad. */
1216 *jump_entry = buildaddr;
1217
1218 return 0;
1219}
1220
1221#endif /* __x86_64__ */
1222
1223/* Build a jump pad that saves registers and calls a collection
1224 function. Writes a jump instruction to the jump pad to
1225 JJUMPAD_INSN. The caller is responsible to write it in at the
1226 tracepoint address. */
1227
1228static int
1229i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1230 CORE_ADDR collector,
1231 CORE_ADDR lockaddr,
1232 ULONGEST orig_size,
1233 CORE_ADDR *jump_entry,
405f8e94
SS
1234 CORE_ADDR *trampoline,
1235 ULONGEST *trampoline_size,
fa593d66
PA
1236 unsigned char *jjump_pad_insn,
1237 ULONGEST *jjump_pad_insn_size,
1238 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1239 CORE_ADDR *adjusted_insn_addr_end,
1240 char *err)
fa593d66
PA
1241{
1242 unsigned char buf[0x100];
1243 int i, offset;
1244 CORE_ADDR buildaddr = *jump_entry;
1245
1246 /* Build the jump pad. */
1247
1248 /* First, do tracepoint data collection. Save registers. */
1249 i = 0;
1250 buf[i++] = 0x60; /* pushad */
1251 buf[i++] = 0x68; /* push tpaddr aka $pc */
1252 *((int *)(buf + i)) = (int) tpaddr;
1253 i += 4;
1254 buf[i++] = 0x9c; /* pushf */
1255 buf[i++] = 0x1e; /* push %ds */
1256 buf[i++] = 0x06; /* push %es */
1257 buf[i++] = 0x0f; /* push %fs */
1258 buf[i++] = 0xa0;
1259 buf[i++] = 0x0f; /* push %gs */
1260 buf[i++] = 0xa8;
1261 buf[i++] = 0x16; /* push %ss */
1262 buf[i++] = 0x0e; /* push %cs */
1263 append_insns (&buildaddr, i, buf);
1264
1265 /* Stack space for the collecting_t object. */
1266 i = 0;
1267 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1268
1269 /* Build the object. */
1270 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1271 memcpy (buf + i, &tpoint, 4);
1272 i += 4;
1273 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1274
1275 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1276 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1277 append_insns (&buildaddr, i, buf);
1278
1279 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1280 If we cared for it, this could be using xchg alternatively. */
1281
1282 i = 0;
1283 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1284 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1285 %esp,<lockaddr> */
1286 memcpy (&buf[i], (void *) &lockaddr, 4);
1287 i += 4;
1288 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1289 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1290 append_insns (&buildaddr, i, buf);
1291
1292
1293 /* Set up arguments to the gdb_collect call. */
1294 i = 0;
1295 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1296 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1297 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1298 append_insns (&buildaddr, i, buf);
1299
1300 i = 0;
1301 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1302 append_insns (&buildaddr, i, buf);
1303
1304 i = 0;
1305 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1306 memcpy (&buf[i], (void *) &tpoint, 4);
1307 i += 4;
1308 append_insns (&buildaddr, i, buf);
1309
1310 buf[0] = 0xe8; /* call <reladdr> */
1311 offset = collector - (buildaddr + sizeof (jump_insn));
1312 memcpy (buf + 1, &offset, 4);
1313 append_insns (&buildaddr, 5, buf);
1314 /* Clean up after the call. */
1315 buf[0] = 0x83; /* add $0x8,%esp */
1316 buf[1] = 0xc4;
1317 buf[2] = 0x08;
1318 append_insns (&buildaddr, 3, buf);
1319
1320
1321 /* Clear the spin-lock. This would need the LOCK prefix on older
1322 broken archs. */
1323 i = 0;
1324 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1325 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1326 memcpy (buf + i, &lockaddr, 4);
1327 i += 4;
1328 append_insns (&buildaddr, i, buf);
1329
1330
1331 /* Remove stack that had been used for the collect_t object. */
1332 i = 0;
1333 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1334 append_insns (&buildaddr, i, buf);
1335
1336 i = 0;
1337 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1338 buf[i++] = 0xc4;
1339 buf[i++] = 0x04;
1340 buf[i++] = 0x17; /* pop %ss */
1341 buf[i++] = 0x0f; /* pop %gs */
1342 buf[i++] = 0xa9;
1343 buf[i++] = 0x0f; /* pop %fs */
1344 buf[i++] = 0xa1;
1345 buf[i++] = 0x07; /* pop %es */
405f8e94 1346 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1347 buf[i++] = 0x9d; /* popf */
1348 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1349 buf[i++] = 0xc4;
1350 buf[i++] = 0x04;
1351 buf[i++] = 0x61; /* popad */
1352 append_insns (&buildaddr, i, buf);
1353
1354 /* Now, adjust the original instruction to execute in the jump
1355 pad. */
1356 *adjusted_insn_addr = buildaddr;
1357 relocate_instruction (&buildaddr, tpaddr);
1358 *adjusted_insn_addr_end = buildaddr;
1359
1360 /* Write the jump back to the program. */
1361 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1362 memcpy (buf, jump_insn, sizeof (jump_insn));
1363 memcpy (buf + 1, &offset, 4);
1364 append_insns (&buildaddr, sizeof (jump_insn), buf);
1365
1366 /* The jump pad is now built. Wire in a jump to our jump pad. This
1367 is always done last (by our caller actually), so that we can
1368 install fast tracepoints with threads running. This relies on
1369 the agent's atomic write support. */
405f8e94
SS
1370 if (orig_size == 4)
1371 {
1372 /* Create a trampoline. */
1373 *trampoline_size = sizeof (jump_insn);
1374 if (!claim_trampoline_space (*trampoline_size, trampoline))
1375 {
1376 /* No trampoline space available. */
1377 strcpy (err,
1378 "E.Cannot allocate trampoline space needed for fast "
1379 "tracepoints on 4-byte instructions.");
1380 return 1;
1381 }
1382
1383 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1384 memcpy (buf, jump_insn, sizeof (jump_insn));
1385 memcpy (buf + 1, &offset, 4);
1386 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1387
1388 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1389 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1390 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1391 memcpy (buf + 2, &offset, 2);
1392 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1393 *jjump_pad_insn_size = sizeof (small_jump_insn);
1394 }
1395 else
1396 {
1397 /* Else use a 32-bit relative jump instruction. */
1398 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1399 memcpy (buf, jump_insn, sizeof (jump_insn));
1400 memcpy (buf + 1, &offset, 4);
1401 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1402 *jjump_pad_insn_size = sizeof (jump_insn);
1403 }
fa593d66
PA
1404
1405 /* Return the end address of our pad. */
1406 *jump_entry = buildaddr;
1407
1408 return 0;
1409}
1410
1411static int
1412x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1413 CORE_ADDR collector,
1414 CORE_ADDR lockaddr,
1415 ULONGEST orig_size,
1416 CORE_ADDR *jump_entry,
405f8e94
SS
1417 CORE_ADDR *trampoline,
1418 ULONGEST *trampoline_size,
fa593d66
PA
1419 unsigned char *jjump_pad_insn,
1420 ULONGEST *jjump_pad_insn_size,
1421 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1422 CORE_ADDR *adjusted_insn_addr_end,
1423 char *err)
fa593d66
PA
1424{
1425#ifdef __x86_64__
3aee8918 1426 if (is_64bit_tdesc ())
fa593d66
PA
1427 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1428 collector, lockaddr,
1429 orig_size, jump_entry,
405f8e94 1430 trampoline, trampoline_size,
fa593d66
PA
1431 jjump_pad_insn,
1432 jjump_pad_insn_size,
1433 adjusted_insn_addr,
405f8e94
SS
1434 adjusted_insn_addr_end,
1435 err);
fa593d66
PA
1436#endif
1437
1438 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1439 collector, lockaddr,
1440 orig_size, jump_entry,
405f8e94 1441 trampoline, trampoline_size,
fa593d66
PA
1442 jjump_pad_insn,
1443 jjump_pad_insn_size,
1444 adjusted_insn_addr,
405f8e94
SS
1445 adjusted_insn_addr_end,
1446 err);
1447}
1448
1449/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1450 architectures. */
1451
1452static int
1453x86_get_min_fast_tracepoint_insn_len (void)
1454{
1455 static int warned_about_fast_tracepoints = 0;
1456
1457#ifdef __x86_64__
1458 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1459 used for fast tracepoints. */
3aee8918 1460 if (is_64bit_tdesc ())
405f8e94
SS
1461 return 5;
1462#endif
1463
58b4daa5 1464 if (agent_loaded_p ())
405f8e94
SS
1465 {
1466 char errbuf[IPA_BUFSIZ];
1467
1468 errbuf[0] = '\0';
1469
1470 /* On x86, if trampolines are available, then 4-byte jump instructions
1471 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1472 with a 4-byte offset are used instead. */
1473 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1474 return 4;
1475 else
1476 {
1477 /* GDB has no channel to explain to user why a shorter fast
1478 tracepoint is not possible, but at least make GDBserver
1479 mention that something has gone awry. */
1480 if (!warned_about_fast_tracepoints)
1481 {
1482 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1483 warned_about_fast_tracepoints = 1;
1484 }
1485 return 5;
1486 }
1487 }
1488 else
1489 {
1490 /* Indicate that the minimum length is currently unknown since the IPA
1491 has not loaded yet. */
1492 return 0;
1493 }
fa593d66
PA
1494}
1495
6a271cae
PA
1496static void
1497add_insns (unsigned char *start, int len)
1498{
1499 CORE_ADDR buildaddr = current_insn_ptr;
1500
1501 if (debug_threads)
87ce2a04
DE
1502 debug_printf ("Adding %d bytes of insn at %s\n",
1503 len, paddress (buildaddr));
6a271cae
PA
1504
1505 append_insns (&buildaddr, len, start);
1506 current_insn_ptr = buildaddr;
1507}
1508
6a271cae
PA
1509/* Our general strategy for emitting code is to avoid specifying raw
1510 bytes whenever possible, and instead copy a block of inline asm
1511 that is embedded in the function. This is a little messy, because
1512 we need to keep the compiler from discarding what looks like dead
1513 code, plus suppress various warnings. */
1514
9e4344e5
PA
1515#define EMIT_ASM(NAME, INSNS) \
1516 do \
1517 { \
1518 extern unsigned char start_ ## NAME, end_ ## NAME; \
1519 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1520 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1521 "\t" "start_" #NAME ":" \
1522 "\t" INSNS "\n" \
1523 "\t" "end_" #NAME ":"); \
1524 } while (0)
6a271cae
PA
1525
1526#ifdef __x86_64__
1527
1528#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1529 do \
1530 { \
1531 extern unsigned char start_ ## NAME, end_ ## NAME; \
1532 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1533 __asm__ (".code32\n" \
1534 "\t" "jmp end_" #NAME "\n" \
1535 "\t" "start_" #NAME ":\n" \
1536 "\t" INSNS "\n" \
1537 "\t" "end_" #NAME ":\n" \
1538 ".code64\n"); \
1539 } while (0)
6a271cae
PA
1540
1541#else
1542
1543#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1544
1545#endif
1546
1547#ifdef __x86_64__
1548
1549static void
1550amd64_emit_prologue (void)
1551{
1552 EMIT_ASM (amd64_prologue,
1553 "pushq %rbp\n\t"
1554 "movq %rsp,%rbp\n\t"
1555 "sub $0x20,%rsp\n\t"
1556 "movq %rdi,-8(%rbp)\n\t"
1557 "movq %rsi,-16(%rbp)");
1558}
1559
1560
1561static void
1562amd64_emit_epilogue (void)
1563{
1564 EMIT_ASM (amd64_epilogue,
1565 "movq -16(%rbp),%rdi\n\t"
1566 "movq %rax,(%rdi)\n\t"
1567 "xor %rax,%rax\n\t"
1568 "leave\n\t"
1569 "ret");
1570}
1571
1572static void
1573amd64_emit_add (void)
1574{
1575 EMIT_ASM (amd64_add,
1576 "add (%rsp),%rax\n\t"
1577 "lea 0x8(%rsp),%rsp");
1578}
1579
1580static void
1581amd64_emit_sub (void)
1582{
1583 EMIT_ASM (amd64_sub,
1584 "sub %rax,(%rsp)\n\t"
1585 "pop %rax");
1586}
1587
1588static void
1589amd64_emit_mul (void)
1590{
1591 emit_error = 1;
1592}
1593
1594static void
1595amd64_emit_lsh (void)
1596{
1597 emit_error = 1;
1598}
1599
1600static void
1601amd64_emit_rsh_signed (void)
1602{
1603 emit_error = 1;
1604}
1605
1606static void
1607amd64_emit_rsh_unsigned (void)
1608{
1609 emit_error = 1;
1610}
1611
1612static void
1613amd64_emit_ext (int arg)
1614{
1615 switch (arg)
1616 {
1617 case 8:
1618 EMIT_ASM (amd64_ext_8,
1619 "cbtw\n\t"
1620 "cwtl\n\t"
1621 "cltq");
1622 break;
1623 case 16:
1624 EMIT_ASM (amd64_ext_16,
1625 "cwtl\n\t"
1626 "cltq");
1627 break;
1628 case 32:
1629 EMIT_ASM (amd64_ext_32,
1630 "cltq");
1631 break;
1632 default:
1633 emit_error = 1;
1634 }
1635}
1636
1637static void
1638amd64_emit_log_not (void)
1639{
1640 EMIT_ASM (amd64_log_not,
1641 "test %rax,%rax\n\t"
1642 "sete %cl\n\t"
1643 "movzbq %cl,%rax");
1644}
1645
1646static void
1647amd64_emit_bit_and (void)
1648{
1649 EMIT_ASM (amd64_and,
1650 "and (%rsp),%rax\n\t"
1651 "lea 0x8(%rsp),%rsp");
1652}
1653
1654static void
1655amd64_emit_bit_or (void)
1656{
1657 EMIT_ASM (amd64_or,
1658 "or (%rsp),%rax\n\t"
1659 "lea 0x8(%rsp),%rsp");
1660}
1661
1662static void
1663amd64_emit_bit_xor (void)
1664{
1665 EMIT_ASM (amd64_xor,
1666 "xor (%rsp),%rax\n\t"
1667 "lea 0x8(%rsp),%rsp");
1668}
1669
1670static void
1671amd64_emit_bit_not (void)
1672{
1673 EMIT_ASM (amd64_bit_not,
1674 "xorq $0xffffffffffffffff,%rax");
1675}
1676
1677static void
1678amd64_emit_equal (void)
1679{
1680 EMIT_ASM (amd64_equal,
1681 "cmp %rax,(%rsp)\n\t"
1682 "je .Lamd64_equal_true\n\t"
1683 "xor %rax,%rax\n\t"
1684 "jmp .Lamd64_equal_end\n\t"
1685 ".Lamd64_equal_true:\n\t"
1686 "mov $0x1,%rax\n\t"
1687 ".Lamd64_equal_end:\n\t"
1688 "lea 0x8(%rsp),%rsp");
1689}
1690
1691static void
1692amd64_emit_less_signed (void)
1693{
1694 EMIT_ASM (amd64_less_signed,
1695 "cmp %rax,(%rsp)\n\t"
1696 "jl .Lamd64_less_signed_true\n\t"
1697 "xor %rax,%rax\n\t"
1698 "jmp .Lamd64_less_signed_end\n\t"
1699 ".Lamd64_less_signed_true:\n\t"
1700 "mov $1,%rax\n\t"
1701 ".Lamd64_less_signed_end:\n\t"
1702 "lea 0x8(%rsp),%rsp");
1703}
1704
1705static void
1706amd64_emit_less_unsigned (void)
1707{
1708 EMIT_ASM (amd64_less_unsigned,
1709 "cmp %rax,(%rsp)\n\t"
1710 "jb .Lamd64_less_unsigned_true\n\t"
1711 "xor %rax,%rax\n\t"
1712 "jmp .Lamd64_less_unsigned_end\n\t"
1713 ".Lamd64_less_unsigned_true:\n\t"
1714 "mov $1,%rax\n\t"
1715 ".Lamd64_less_unsigned_end:\n\t"
1716 "lea 0x8(%rsp),%rsp");
1717}
1718
1719static void
1720amd64_emit_ref (int size)
1721{
1722 switch (size)
1723 {
1724 case 1:
1725 EMIT_ASM (amd64_ref1,
1726 "movb (%rax),%al");
1727 break;
1728 case 2:
1729 EMIT_ASM (amd64_ref2,
1730 "movw (%rax),%ax");
1731 break;
1732 case 4:
1733 EMIT_ASM (amd64_ref4,
1734 "movl (%rax),%eax");
1735 break;
1736 case 8:
1737 EMIT_ASM (amd64_ref8,
1738 "movq (%rax),%rax");
1739 break;
1740 }
1741}
1742
1743static void
1744amd64_emit_if_goto (int *offset_p, int *size_p)
1745{
1746 EMIT_ASM (amd64_if_goto,
1747 "mov %rax,%rcx\n\t"
1748 "pop %rax\n\t"
1749 "cmp $0,%rcx\n\t"
1750 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1751 if (offset_p)
1752 *offset_p = 10;
1753 if (size_p)
1754 *size_p = 4;
1755}
1756
1757static void
1758amd64_emit_goto (int *offset_p, int *size_p)
1759{
1760 EMIT_ASM (amd64_goto,
1761 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1762 if (offset_p)
1763 *offset_p = 1;
1764 if (size_p)
1765 *size_p = 4;
1766}
1767
1768static void
1769amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1770{
1771 int diff = (to - (from + size));
1772 unsigned char buf[sizeof (int)];
1773
1774 if (size != 4)
1775 {
1776 emit_error = 1;
1777 return;
1778 }
1779
1780 memcpy (buf, &diff, sizeof (int));
1781 write_inferior_memory (from, buf, sizeof (int));
1782}
1783
1784static void
4e29fb54 1785amd64_emit_const (LONGEST num)
6a271cae
PA
1786{
1787 unsigned char buf[16];
1788 int i;
1789 CORE_ADDR buildaddr = current_insn_ptr;
1790
1791 i = 0;
1792 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1793 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1794 i += 8;
1795 append_insns (&buildaddr, i, buf);
1796 current_insn_ptr = buildaddr;
1797}
1798
1799static void
1800amd64_emit_call (CORE_ADDR fn)
1801{
1802 unsigned char buf[16];
1803 int i;
1804 CORE_ADDR buildaddr;
4e29fb54 1805 LONGEST offset64;
6a271cae
PA
1806
1807 /* The destination function being in the shared library, may be
1808 >31-bits away off the compiled code pad. */
1809
1810 buildaddr = current_insn_ptr;
1811
1812 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1813
1814 i = 0;
1815
1816 if (offset64 > INT_MAX || offset64 < INT_MIN)
1817 {
1818 /* Offset is too large for a call. Use callq, but that requires
1819 a register, so avoid it if possible. Use r10, since it is
1820 call-clobbered, we don't have to push/pop it. */
1821 buf[i++] = 0x48; /* mov $fn,%r10 */
1822 buf[i++] = 0xba;
1823 memcpy (buf + i, &fn, 8);
1824 i += 8;
1825 buf[i++] = 0xff; /* callq *%r10 */
1826 buf[i++] = 0xd2;
1827 }
1828 else
1829 {
1830 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1831
1832 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1833 memcpy (buf + i, &offset32, 4);
1834 i += 4;
1835 }
1836
1837 append_insns (&buildaddr, i, buf);
1838 current_insn_ptr = buildaddr;
1839}
1840
1841static void
1842amd64_emit_reg (int reg)
1843{
1844 unsigned char buf[16];
1845 int i;
1846 CORE_ADDR buildaddr;
1847
1848 /* Assume raw_regs is still in %rdi. */
1849 buildaddr = current_insn_ptr;
1850 i = 0;
1851 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1852 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1853 i += 4;
1854 append_insns (&buildaddr, i, buf);
1855 current_insn_ptr = buildaddr;
1856 amd64_emit_call (get_raw_reg_func_addr ());
1857}
1858
1859static void
1860amd64_emit_pop (void)
1861{
1862 EMIT_ASM (amd64_pop,
1863 "pop %rax");
1864}
1865
1866static void
1867amd64_emit_stack_flush (void)
1868{
1869 EMIT_ASM (amd64_stack_flush,
1870 "push %rax");
1871}
1872
1873static void
1874amd64_emit_zero_ext (int arg)
1875{
1876 switch (arg)
1877 {
1878 case 8:
1879 EMIT_ASM (amd64_zero_ext_8,
1880 "and $0xff,%rax");
1881 break;
1882 case 16:
1883 EMIT_ASM (amd64_zero_ext_16,
1884 "and $0xffff,%rax");
1885 break;
1886 case 32:
1887 EMIT_ASM (amd64_zero_ext_32,
1888 "mov $0xffffffff,%rcx\n\t"
1889 "and %rcx,%rax");
1890 break;
1891 default:
1892 emit_error = 1;
1893 }
1894}
1895
1896static void
1897amd64_emit_swap (void)
1898{
1899 EMIT_ASM (amd64_swap,
1900 "mov %rax,%rcx\n\t"
1901 "pop %rax\n\t"
1902 "push %rcx");
1903}
1904
1905static void
1906amd64_emit_stack_adjust (int n)
1907{
1908 unsigned char buf[16];
1909 int i;
1910 CORE_ADDR buildaddr = current_insn_ptr;
1911
1912 i = 0;
1913 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1914 buf[i++] = 0x8d;
1915 buf[i++] = 0x64;
1916 buf[i++] = 0x24;
1917 /* This only handles adjustments up to 16, but we don't expect any more. */
1918 buf[i++] = n * 8;
1919 append_insns (&buildaddr, i, buf);
1920 current_insn_ptr = buildaddr;
1921}
1922
1923/* FN's prototype is `LONGEST(*fn)(int)'. */
1924
1925static void
1926amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1927{
1928 unsigned char buf[16];
1929 int i;
1930 CORE_ADDR buildaddr;
1931
1932 buildaddr = current_insn_ptr;
1933 i = 0;
1934 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1935 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1936 i += 4;
1937 append_insns (&buildaddr, i, buf);
1938 current_insn_ptr = buildaddr;
1939 amd64_emit_call (fn);
1940}
1941
4e29fb54 1942/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1943
1944static void
1945amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1946{
1947 unsigned char buf[16];
1948 int i;
1949 CORE_ADDR buildaddr;
1950
1951 buildaddr = current_insn_ptr;
1952 i = 0;
1953 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1954 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1955 i += 4;
1956 append_insns (&buildaddr, i, buf);
1957 current_insn_ptr = buildaddr;
1958 EMIT_ASM (amd64_void_call_2_a,
1959 /* Save away a copy of the stack top. */
1960 "push %rax\n\t"
1961 /* Also pass top as the second argument. */
1962 "mov %rax,%rsi");
1963 amd64_emit_call (fn);
1964 EMIT_ASM (amd64_void_call_2_b,
1965 /* Restore the stack top, %rax may have been trashed. */
1966 "pop %rax");
1967}
1968
6b9801d4
SS
1969void
1970amd64_emit_eq_goto (int *offset_p, int *size_p)
1971{
1972 EMIT_ASM (amd64_eq,
1973 "cmp %rax,(%rsp)\n\t"
1974 "jne .Lamd64_eq_fallthru\n\t"
1975 "lea 0x8(%rsp),%rsp\n\t"
1976 "pop %rax\n\t"
1977 /* jmp, but don't trust the assembler to choose the right jump */
1978 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1979 ".Lamd64_eq_fallthru:\n\t"
1980 "lea 0x8(%rsp),%rsp\n\t"
1981 "pop %rax");
1982
1983 if (offset_p)
1984 *offset_p = 13;
1985 if (size_p)
1986 *size_p = 4;
1987}
1988
1989void
1990amd64_emit_ne_goto (int *offset_p, int *size_p)
1991{
1992 EMIT_ASM (amd64_ne,
1993 "cmp %rax,(%rsp)\n\t"
1994 "je .Lamd64_ne_fallthru\n\t"
1995 "lea 0x8(%rsp),%rsp\n\t"
1996 "pop %rax\n\t"
1997 /* jmp, but don't trust the assembler to choose the right jump */
1998 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1999 ".Lamd64_ne_fallthru:\n\t"
2000 "lea 0x8(%rsp),%rsp\n\t"
2001 "pop %rax");
2002
2003 if (offset_p)
2004 *offset_p = 13;
2005 if (size_p)
2006 *size_p = 4;
2007}
2008
2009void
2010amd64_emit_lt_goto (int *offset_p, int *size_p)
2011{
2012 EMIT_ASM (amd64_lt,
2013 "cmp %rax,(%rsp)\n\t"
2014 "jnl .Lamd64_lt_fallthru\n\t"
2015 "lea 0x8(%rsp),%rsp\n\t"
2016 "pop %rax\n\t"
2017 /* jmp, but don't trust the assembler to choose the right jump */
2018 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2019 ".Lamd64_lt_fallthru:\n\t"
2020 "lea 0x8(%rsp),%rsp\n\t"
2021 "pop %rax");
2022
2023 if (offset_p)
2024 *offset_p = 13;
2025 if (size_p)
2026 *size_p = 4;
2027}
2028
2029void
2030amd64_emit_le_goto (int *offset_p, int *size_p)
2031{
2032 EMIT_ASM (amd64_le,
2033 "cmp %rax,(%rsp)\n\t"
2034 "jnle .Lamd64_le_fallthru\n\t"
2035 "lea 0x8(%rsp),%rsp\n\t"
2036 "pop %rax\n\t"
2037 /* jmp, but don't trust the assembler to choose the right jump */
2038 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2039 ".Lamd64_le_fallthru:\n\t"
2040 "lea 0x8(%rsp),%rsp\n\t"
2041 "pop %rax");
2042
2043 if (offset_p)
2044 *offset_p = 13;
2045 if (size_p)
2046 *size_p = 4;
2047}
2048
2049void
2050amd64_emit_gt_goto (int *offset_p, int *size_p)
2051{
2052 EMIT_ASM (amd64_gt,
2053 "cmp %rax,(%rsp)\n\t"
2054 "jng .Lamd64_gt_fallthru\n\t"
2055 "lea 0x8(%rsp),%rsp\n\t"
2056 "pop %rax\n\t"
2057 /* jmp, but don't trust the assembler to choose the right jump */
2058 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2059 ".Lamd64_gt_fallthru:\n\t"
2060 "lea 0x8(%rsp),%rsp\n\t"
2061 "pop %rax");
2062
2063 if (offset_p)
2064 *offset_p = 13;
2065 if (size_p)
2066 *size_p = 4;
2067}
2068
2069void
2070amd64_emit_ge_goto (int *offset_p, int *size_p)
2071{
2072 EMIT_ASM (amd64_ge,
2073 "cmp %rax,(%rsp)\n\t"
2074 "jnge .Lamd64_ge_fallthru\n\t"
2075 ".Lamd64_ge_jump:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2077 "pop %rax\n\t"
2078 /* jmp, but don't trust the assembler to choose the right jump */
2079 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2080 ".Lamd64_ge_fallthru:\n\t"
2081 "lea 0x8(%rsp),%rsp\n\t"
2082 "pop %rax");
2083
2084 if (offset_p)
2085 *offset_p = 13;
2086 if (size_p)
2087 *size_p = 4;
2088}
2089
6a271cae
PA
2090struct emit_ops amd64_emit_ops =
2091 {
2092 amd64_emit_prologue,
2093 amd64_emit_epilogue,
2094 amd64_emit_add,
2095 amd64_emit_sub,
2096 amd64_emit_mul,
2097 amd64_emit_lsh,
2098 amd64_emit_rsh_signed,
2099 amd64_emit_rsh_unsigned,
2100 amd64_emit_ext,
2101 amd64_emit_log_not,
2102 amd64_emit_bit_and,
2103 amd64_emit_bit_or,
2104 amd64_emit_bit_xor,
2105 amd64_emit_bit_not,
2106 amd64_emit_equal,
2107 amd64_emit_less_signed,
2108 amd64_emit_less_unsigned,
2109 amd64_emit_ref,
2110 amd64_emit_if_goto,
2111 amd64_emit_goto,
2112 amd64_write_goto_address,
2113 amd64_emit_const,
2114 amd64_emit_call,
2115 amd64_emit_reg,
2116 amd64_emit_pop,
2117 amd64_emit_stack_flush,
2118 amd64_emit_zero_ext,
2119 amd64_emit_swap,
2120 amd64_emit_stack_adjust,
2121 amd64_emit_int_call_1,
6b9801d4
SS
2122 amd64_emit_void_call_2,
2123 amd64_emit_eq_goto,
2124 amd64_emit_ne_goto,
2125 amd64_emit_lt_goto,
2126 amd64_emit_le_goto,
2127 amd64_emit_gt_goto,
2128 amd64_emit_ge_goto
6a271cae
PA
2129 };
2130
2131#endif /* __x86_64__ */
2132
2133static void
2134i386_emit_prologue (void)
2135{
2136 EMIT_ASM32 (i386_prologue,
2137 "push %ebp\n\t"
bf15cbda
SS
2138 "mov %esp,%ebp\n\t"
2139 "push %ebx");
6a271cae
PA
2140 /* At this point, the raw regs base address is at 8(%ebp), and the
2141 value pointer is at 12(%ebp). */
2142}
2143
2144static void
2145i386_emit_epilogue (void)
2146{
2147 EMIT_ASM32 (i386_epilogue,
2148 "mov 12(%ebp),%ecx\n\t"
2149 "mov %eax,(%ecx)\n\t"
2150 "mov %ebx,0x4(%ecx)\n\t"
2151 "xor %eax,%eax\n\t"
bf15cbda 2152 "pop %ebx\n\t"
6a271cae
PA
2153 "pop %ebp\n\t"
2154 "ret");
2155}
2156
2157static void
2158i386_emit_add (void)
2159{
2160 EMIT_ASM32 (i386_add,
2161 "add (%esp),%eax\n\t"
2162 "adc 0x4(%esp),%ebx\n\t"
2163 "lea 0x8(%esp),%esp");
2164}
2165
2166static void
2167i386_emit_sub (void)
2168{
2169 EMIT_ASM32 (i386_sub,
2170 "subl %eax,(%esp)\n\t"
2171 "sbbl %ebx,4(%esp)\n\t"
2172 "pop %eax\n\t"
2173 "pop %ebx\n\t");
2174}
2175
2176static void
2177i386_emit_mul (void)
2178{
2179 emit_error = 1;
2180}
2181
2182static void
2183i386_emit_lsh (void)
2184{
2185 emit_error = 1;
2186}
2187
2188static void
2189i386_emit_rsh_signed (void)
2190{
2191 emit_error = 1;
2192}
2193
2194static void
2195i386_emit_rsh_unsigned (void)
2196{
2197 emit_error = 1;
2198}
2199
2200static void
2201i386_emit_ext (int arg)
2202{
2203 switch (arg)
2204 {
2205 case 8:
2206 EMIT_ASM32 (i386_ext_8,
2207 "cbtw\n\t"
2208 "cwtl\n\t"
2209 "movl %eax,%ebx\n\t"
2210 "sarl $31,%ebx");
2211 break;
2212 case 16:
2213 EMIT_ASM32 (i386_ext_16,
2214 "cwtl\n\t"
2215 "movl %eax,%ebx\n\t"
2216 "sarl $31,%ebx");
2217 break;
2218 case 32:
2219 EMIT_ASM32 (i386_ext_32,
2220 "movl %eax,%ebx\n\t"
2221 "sarl $31,%ebx");
2222 break;
2223 default:
2224 emit_error = 1;
2225 }
2226}
2227
2228static void
2229i386_emit_log_not (void)
2230{
2231 EMIT_ASM32 (i386_log_not,
2232 "or %ebx,%eax\n\t"
2233 "test %eax,%eax\n\t"
2234 "sete %cl\n\t"
2235 "xor %ebx,%ebx\n\t"
2236 "movzbl %cl,%eax");
2237}
2238
2239static void
2240i386_emit_bit_and (void)
2241{
2242 EMIT_ASM32 (i386_and,
2243 "and (%esp),%eax\n\t"
2244 "and 0x4(%esp),%ebx\n\t"
2245 "lea 0x8(%esp),%esp");
2246}
2247
2248static void
2249i386_emit_bit_or (void)
2250{
2251 EMIT_ASM32 (i386_or,
2252 "or (%esp),%eax\n\t"
2253 "or 0x4(%esp),%ebx\n\t"
2254 "lea 0x8(%esp),%esp");
2255}
2256
2257static void
2258i386_emit_bit_xor (void)
2259{
2260 EMIT_ASM32 (i386_xor,
2261 "xor (%esp),%eax\n\t"
2262 "xor 0x4(%esp),%ebx\n\t"
2263 "lea 0x8(%esp),%esp");
2264}
2265
2266static void
2267i386_emit_bit_not (void)
2268{
2269 EMIT_ASM32 (i386_bit_not,
2270 "xor $0xffffffff,%eax\n\t"
2271 "xor $0xffffffff,%ebx\n\t");
2272}
2273
2274static void
2275i386_emit_equal (void)
2276{
2277 EMIT_ASM32 (i386_equal,
2278 "cmpl %ebx,4(%esp)\n\t"
2279 "jne .Li386_equal_false\n\t"
2280 "cmpl %eax,(%esp)\n\t"
2281 "je .Li386_equal_true\n\t"
2282 ".Li386_equal_false:\n\t"
2283 "xor %eax,%eax\n\t"
2284 "jmp .Li386_equal_end\n\t"
2285 ".Li386_equal_true:\n\t"
2286 "mov $1,%eax\n\t"
2287 ".Li386_equal_end:\n\t"
2288 "xor %ebx,%ebx\n\t"
2289 "lea 0x8(%esp),%esp");
2290}
2291
2292static void
2293i386_emit_less_signed (void)
2294{
2295 EMIT_ASM32 (i386_less_signed,
2296 "cmpl %ebx,4(%esp)\n\t"
2297 "jl .Li386_less_signed_true\n\t"
2298 "jne .Li386_less_signed_false\n\t"
2299 "cmpl %eax,(%esp)\n\t"
2300 "jl .Li386_less_signed_true\n\t"
2301 ".Li386_less_signed_false:\n\t"
2302 "xor %eax,%eax\n\t"
2303 "jmp .Li386_less_signed_end\n\t"
2304 ".Li386_less_signed_true:\n\t"
2305 "mov $1,%eax\n\t"
2306 ".Li386_less_signed_end:\n\t"
2307 "xor %ebx,%ebx\n\t"
2308 "lea 0x8(%esp),%esp");
2309}
2310
2311static void
2312i386_emit_less_unsigned (void)
2313{
2314 EMIT_ASM32 (i386_less_unsigned,
2315 "cmpl %ebx,4(%esp)\n\t"
2316 "jb .Li386_less_unsigned_true\n\t"
2317 "jne .Li386_less_unsigned_false\n\t"
2318 "cmpl %eax,(%esp)\n\t"
2319 "jb .Li386_less_unsigned_true\n\t"
2320 ".Li386_less_unsigned_false:\n\t"
2321 "xor %eax,%eax\n\t"
2322 "jmp .Li386_less_unsigned_end\n\t"
2323 ".Li386_less_unsigned_true:\n\t"
2324 "mov $1,%eax\n\t"
2325 ".Li386_less_unsigned_end:\n\t"
2326 "xor %ebx,%ebx\n\t"
2327 "lea 0x8(%esp),%esp");
2328}
2329
2330static void
2331i386_emit_ref (int size)
2332{
2333 switch (size)
2334 {
2335 case 1:
2336 EMIT_ASM32 (i386_ref1,
2337 "movb (%eax),%al");
2338 break;
2339 case 2:
2340 EMIT_ASM32 (i386_ref2,
2341 "movw (%eax),%ax");
2342 break;
2343 case 4:
2344 EMIT_ASM32 (i386_ref4,
2345 "movl (%eax),%eax");
2346 break;
2347 case 8:
2348 EMIT_ASM32 (i386_ref8,
2349 "movl 4(%eax),%ebx\n\t"
2350 "movl (%eax),%eax");
2351 break;
2352 }
2353}
2354
2355static void
2356i386_emit_if_goto (int *offset_p, int *size_p)
2357{
2358 EMIT_ASM32 (i386_if_goto,
2359 "mov %eax,%ecx\n\t"
2360 "or %ebx,%ecx\n\t"
2361 "pop %eax\n\t"
2362 "pop %ebx\n\t"
2363 "cmpl $0,%ecx\n\t"
2364 /* Don't trust the assembler to choose the right jump */
2365 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2366
2367 if (offset_p)
2368 *offset_p = 11; /* be sure that this matches the sequence above */
2369 if (size_p)
2370 *size_p = 4;
2371}
2372
2373static void
2374i386_emit_goto (int *offset_p, int *size_p)
2375{
2376 EMIT_ASM32 (i386_goto,
2377 /* Don't trust the assembler to choose the right jump */
2378 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2379 if (offset_p)
2380 *offset_p = 1;
2381 if (size_p)
2382 *size_p = 4;
2383}
2384
2385static void
2386i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2387{
2388 int diff = (to - (from + size));
2389 unsigned char buf[sizeof (int)];
2390
2391 /* We're only doing 4-byte sizes at the moment. */
2392 if (size != 4)
2393 {
2394 emit_error = 1;
2395 return;
2396 }
2397
2398 memcpy (buf, &diff, sizeof (int));
2399 write_inferior_memory (from, buf, sizeof (int));
2400}
2401
2402static void
4e29fb54 2403i386_emit_const (LONGEST num)
6a271cae
PA
2404{
2405 unsigned char buf[16];
b00ad6ff 2406 int i, hi, lo;
6a271cae
PA
2407 CORE_ADDR buildaddr = current_insn_ptr;
2408
2409 i = 0;
2410 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2411 lo = num & 0xffffffff;
2412 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2413 i += 4;
2414 hi = ((num >> 32) & 0xffffffff);
2415 if (hi)
2416 {
2417 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2418 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2419 i += 4;
2420 }
2421 else
2422 {
2423 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2424 }
2425 append_insns (&buildaddr, i, buf);
2426 current_insn_ptr = buildaddr;
2427}
2428
2429static void
2430i386_emit_call (CORE_ADDR fn)
2431{
2432 unsigned char buf[16];
2433 int i, offset;
2434 CORE_ADDR buildaddr;
2435
2436 buildaddr = current_insn_ptr;
2437 i = 0;
2438 buf[i++] = 0xe8; /* call <reladdr> */
2439 offset = ((int) fn) - (buildaddr + 5);
2440 memcpy (buf + 1, &offset, 4);
2441 append_insns (&buildaddr, 5, buf);
2442 current_insn_ptr = buildaddr;
2443}
2444
2445static void
2446i386_emit_reg (int reg)
2447{
2448 unsigned char buf[16];
2449 int i;
2450 CORE_ADDR buildaddr;
2451
2452 EMIT_ASM32 (i386_reg_a,
2453 "sub $0x8,%esp");
2454 buildaddr = current_insn_ptr;
2455 i = 0;
2456 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2457 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2458 i += 4;
2459 append_insns (&buildaddr, i, buf);
2460 current_insn_ptr = buildaddr;
2461 EMIT_ASM32 (i386_reg_b,
2462 "mov %eax,4(%esp)\n\t"
2463 "mov 8(%ebp),%eax\n\t"
2464 "mov %eax,(%esp)");
2465 i386_emit_call (get_raw_reg_func_addr ());
2466 EMIT_ASM32 (i386_reg_c,
2467 "xor %ebx,%ebx\n\t"
2468 "lea 0x8(%esp),%esp");
2469}
2470
2471static void
2472i386_emit_pop (void)
2473{
2474 EMIT_ASM32 (i386_pop,
2475 "pop %eax\n\t"
2476 "pop %ebx");
2477}
2478
2479static void
2480i386_emit_stack_flush (void)
2481{
2482 EMIT_ASM32 (i386_stack_flush,
2483 "push %ebx\n\t"
2484 "push %eax");
2485}
2486
2487static void
2488i386_emit_zero_ext (int arg)
2489{
2490 switch (arg)
2491 {
2492 case 8:
2493 EMIT_ASM32 (i386_zero_ext_8,
2494 "and $0xff,%eax\n\t"
2495 "xor %ebx,%ebx");
2496 break;
2497 case 16:
2498 EMIT_ASM32 (i386_zero_ext_16,
2499 "and $0xffff,%eax\n\t"
2500 "xor %ebx,%ebx");
2501 break;
2502 case 32:
2503 EMIT_ASM32 (i386_zero_ext_32,
2504 "xor %ebx,%ebx");
2505 break;
2506 default:
2507 emit_error = 1;
2508 }
2509}
2510
2511static void
2512i386_emit_swap (void)
2513{
2514 EMIT_ASM32 (i386_swap,
2515 "mov %eax,%ecx\n\t"
2516 "mov %ebx,%edx\n\t"
2517 "pop %eax\n\t"
2518 "pop %ebx\n\t"
2519 "push %edx\n\t"
2520 "push %ecx");
2521}
2522
2523static void
2524i386_emit_stack_adjust (int n)
2525{
2526 unsigned char buf[16];
2527 int i;
2528 CORE_ADDR buildaddr = current_insn_ptr;
2529
2530 i = 0;
2531 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2532 buf[i++] = 0x64;
2533 buf[i++] = 0x24;
2534 buf[i++] = n * 8;
2535 append_insns (&buildaddr, i, buf);
2536 current_insn_ptr = buildaddr;
2537}
2538
2539/* FN's prototype is `LONGEST(*fn)(int)'. */
2540
2541static void
2542i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2543{
2544 unsigned char buf[16];
2545 int i;
2546 CORE_ADDR buildaddr;
2547
2548 EMIT_ASM32 (i386_int_call_1_a,
2549 /* Reserve a bit of stack space. */
2550 "sub $0x8,%esp");
2551 /* Put the one argument on the stack. */
2552 buildaddr = current_insn_ptr;
2553 i = 0;
2554 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2555 buf[i++] = 0x04;
2556 buf[i++] = 0x24;
b00ad6ff 2557 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2558 i += 4;
2559 append_insns (&buildaddr, i, buf);
2560 current_insn_ptr = buildaddr;
2561 i386_emit_call (fn);
2562 EMIT_ASM32 (i386_int_call_1_c,
2563 "mov %edx,%ebx\n\t"
2564 "lea 0x8(%esp),%esp");
2565}
2566
4e29fb54 2567/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2568
2569static void
2570i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2571{
2572 unsigned char buf[16];
2573 int i;
2574 CORE_ADDR buildaddr;
2575
2576 EMIT_ASM32 (i386_void_call_2_a,
2577 /* Preserve %eax only; we don't have to worry about %ebx. */
2578 "push %eax\n\t"
2579 /* Reserve a bit of stack space for arguments. */
2580 "sub $0x10,%esp\n\t"
2581 /* Copy "top" to the second argument position. (Note that
2582 we can't assume function won't scribble on its
2583 arguments, so don't try to restore from this.) */
2584 "mov %eax,4(%esp)\n\t"
2585 "mov %ebx,8(%esp)");
2586 /* Put the first argument on the stack. */
2587 buildaddr = current_insn_ptr;
2588 i = 0;
2589 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2590 buf[i++] = 0x04;
2591 buf[i++] = 0x24;
b00ad6ff 2592 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2593 i += 4;
2594 append_insns (&buildaddr, i, buf);
2595 current_insn_ptr = buildaddr;
2596 i386_emit_call (fn);
2597 EMIT_ASM32 (i386_void_call_2_b,
2598 "lea 0x10(%esp),%esp\n\t"
2599 /* Restore original stack top. */
2600 "pop %eax");
2601}
2602
6b9801d4
SS
2603
2604void
2605i386_emit_eq_goto (int *offset_p, int *size_p)
2606{
2607 EMIT_ASM32 (eq,
2608 /* Check low half first, more likely to be decider */
2609 "cmpl %eax,(%esp)\n\t"
2610 "jne .Leq_fallthru\n\t"
2611 "cmpl %ebx,4(%esp)\n\t"
2612 "jne .Leq_fallthru\n\t"
2613 "lea 0x8(%esp),%esp\n\t"
2614 "pop %eax\n\t"
2615 "pop %ebx\n\t"
2616 /* jmp, but don't trust the assembler to choose the right jump */
2617 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2618 ".Leq_fallthru:\n\t"
2619 "lea 0x8(%esp),%esp\n\t"
2620 "pop %eax\n\t"
2621 "pop %ebx");
2622
2623 if (offset_p)
2624 *offset_p = 18;
2625 if (size_p)
2626 *size_p = 4;
2627}
2628
2629void
2630i386_emit_ne_goto (int *offset_p, int *size_p)
2631{
2632 EMIT_ASM32 (ne,
2633 /* Check low half first, more likely to be decider */
2634 "cmpl %eax,(%esp)\n\t"
2635 "jne .Lne_jump\n\t"
2636 "cmpl %ebx,4(%esp)\n\t"
2637 "je .Lne_fallthru\n\t"
2638 ".Lne_jump:\n\t"
2639 "lea 0x8(%esp),%esp\n\t"
2640 "pop %eax\n\t"
2641 "pop %ebx\n\t"
2642 /* jmp, but don't trust the assembler to choose the right jump */
2643 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2644 ".Lne_fallthru:\n\t"
2645 "lea 0x8(%esp),%esp\n\t"
2646 "pop %eax\n\t"
2647 "pop %ebx");
2648
2649 if (offset_p)
2650 *offset_p = 18;
2651 if (size_p)
2652 *size_p = 4;
2653}
2654
2655void
2656i386_emit_lt_goto (int *offset_p, int *size_p)
2657{
2658 EMIT_ASM32 (lt,
2659 "cmpl %ebx,4(%esp)\n\t"
2660 "jl .Llt_jump\n\t"
2661 "jne .Llt_fallthru\n\t"
2662 "cmpl %eax,(%esp)\n\t"
2663 "jnl .Llt_fallthru\n\t"
2664 ".Llt_jump:\n\t"
2665 "lea 0x8(%esp),%esp\n\t"
2666 "pop %eax\n\t"
2667 "pop %ebx\n\t"
2668 /* jmp, but don't trust the assembler to choose the right jump */
2669 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2670 ".Llt_fallthru:\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2672 "pop %eax\n\t"
2673 "pop %ebx");
2674
2675 if (offset_p)
2676 *offset_p = 20;
2677 if (size_p)
2678 *size_p = 4;
2679}
2680
2681void
2682i386_emit_le_goto (int *offset_p, int *size_p)
2683{
2684 EMIT_ASM32 (le,
2685 "cmpl %ebx,4(%esp)\n\t"
2686 "jle .Lle_jump\n\t"
2687 "jne .Lle_fallthru\n\t"
2688 "cmpl %eax,(%esp)\n\t"
2689 "jnle .Lle_fallthru\n\t"
2690 ".Lle_jump:\n\t"
2691 "lea 0x8(%esp),%esp\n\t"
2692 "pop %eax\n\t"
2693 "pop %ebx\n\t"
2694 /* jmp, but don't trust the assembler to choose the right jump */
2695 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2696 ".Lle_fallthru:\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx");
2700
2701 if (offset_p)
2702 *offset_p = 20;
2703 if (size_p)
2704 *size_p = 4;
2705}
2706
2707void
2708i386_emit_gt_goto (int *offset_p, int *size_p)
2709{
2710 EMIT_ASM32 (gt,
2711 "cmpl %ebx,4(%esp)\n\t"
2712 "jg .Lgt_jump\n\t"
2713 "jne .Lgt_fallthru\n\t"
2714 "cmpl %eax,(%esp)\n\t"
2715 "jng .Lgt_fallthru\n\t"
2716 ".Lgt_jump:\n\t"
2717 "lea 0x8(%esp),%esp\n\t"
2718 "pop %eax\n\t"
2719 "pop %ebx\n\t"
2720 /* jmp, but don't trust the assembler to choose the right jump */
2721 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2722 ".Lgt_fallthru:\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2724 "pop %eax\n\t"
2725 "pop %ebx");
2726
2727 if (offset_p)
2728 *offset_p = 20;
2729 if (size_p)
2730 *size_p = 4;
2731}
2732
2733void
2734i386_emit_ge_goto (int *offset_p, int *size_p)
2735{
2736 EMIT_ASM32 (ge,
2737 "cmpl %ebx,4(%esp)\n\t"
2738 "jge .Lge_jump\n\t"
2739 "jne .Lge_fallthru\n\t"
2740 "cmpl %eax,(%esp)\n\t"
2741 "jnge .Lge_fallthru\n\t"
2742 ".Lge_jump:\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2744 "pop %eax\n\t"
2745 "pop %ebx\n\t"
2746 /* jmp, but don't trust the assembler to choose the right jump */
2747 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2748 ".Lge_fallthru:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx");
2752
2753 if (offset_p)
2754 *offset_p = 20;
2755 if (size_p)
2756 *size_p = 4;
2757}
2758
6a271cae
PA
2759struct emit_ops i386_emit_ops =
2760 {
2761 i386_emit_prologue,
2762 i386_emit_epilogue,
2763 i386_emit_add,
2764 i386_emit_sub,
2765 i386_emit_mul,
2766 i386_emit_lsh,
2767 i386_emit_rsh_signed,
2768 i386_emit_rsh_unsigned,
2769 i386_emit_ext,
2770 i386_emit_log_not,
2771 i386_emit_bit_and,
2772 i386_emit_bit_or,
2773 i386_emit_bit_xor,
2774 i386_emit_bit_not,
2775 i386_emit_equal,
2776 i386_emit_less_signed,
2777 i386_emit_less_unsigned,
2778 i386_emit_ref,
2779 i386_emit_if_goto,
2780 i386_emit_goto,
2781 i386_write_goto_address,
2782 i386_emit_const,
2783 i386_emit_call,
2784 i386_emit_reg,
2785 i386_emit_pop,
2786 i386_emit_stack_flush,
2787 i386_emit_zero_ext,
2788 i386_emit_swap,
2789 i386_emit_stack_adjust,
2790 i386_emit_int_call_1,
6b9801d4
SS
2791 i386_emit_void_call_2,
2792 i386_emit_eq_goto,
2793 i386_emit_ne_goto,
2794 i386_emit_lt_goto,
2795 i386_emit_le_goto,
2796 i386_emit_gt_goto,
2797 i386_emit_ge_goto
6a271cae
PA
2798 };
2799
2800
2801static struct emit_ops *
2802x86_emit_ops (void)
2803{
2804#ifdef __x86_64__
3aee8918 2805 if (is_64bit_tdesc ())
6a271cae
PA
2806 return &amd64_emit_ops;
2807 else
2808#endif
2809 return &i386_emit_ops;
2810}
2811
dd373349
AT
2812/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2813
2814static const gdb_byte *
2815x86_sw_breakpoint_from_kind (int kind, int *size)
2816{
2817 *size = x86_breakpoint_len;
2818 return x86_breakpoint;
2819}
2820
c2d6af84
PA
2821static int
2822x86_supports_range_stepping (void)
2823{
2824 return 1;
2825}
2826
7d00775e
AT
2827/* Implementation of linux_target_ops method "supports_hardware_single_step".
2828 */
2829
2830static int
2831x86_supports_hardware_single_step (void)
2832{
2833 return 1;
2834}
2835
ae91f625
MK
2836static int
2837x86_get_ipa_tdesc_idx (void)
2838{
2839 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2840 const struct target_desc *tdesc = regcache->tdesc;
2841
2842#ifdef __x86_64__
b4570e4b 2843 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2844#endif
2845
f49ff000 2846 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2847 return X86_TDESC_SSE;
ae91f625 2848
f49ff000 2849 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2850}
2851
d0722149
DE
2852/* This is initialized assuming an amd64 target.
2853 x86_arch_setup will correct it for i386 or amd64 targets. */
2854
2855struct linux_target_ops the_low_target =
2856{
2857 x86_arch_setup,
3aee8918
PA
2858 x86_linux_regs_info,
2859 x86_cannot_fetch_register,
2860 x86_cannot_store_register,
c14dfd32 2861 NULL, /* fetch_register */
d0722149
DE
2862 x86_get_pc,
2863 x86_set_pc,
dd373349
AT
2864 NULL, /* breakpoint_kind_from_pc */
2865 x86_sw_breakpoint_from_kind,
d0722149
DE
2866 NULL,
2867 1,
2868 x86_breakpoint_at,
802e8e6d 2869 x86_supports_z_point_type,
aa5ca48f
DE
2870 x86_insert_point,
2871 x86_remove_point,
2872 x86_stopped_by_watchpoint,
2873 x86_stopped_data_address,
d0722149
DE
2874 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2875 native i386 case (no registers smaller than an xfer unit), and are not
2876 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2877 NULL,
2878 NULL,
2879 /* need to fix up i386 siginfo if host is amd64 */
2880 x86_siginfo_fixup,
aa5ca48f 2881 x86_linux_new_process,
04ec7890 2882 x86_linux_delete_process,
aa5ca48f 2883 x86_linux_new_thread,
466eecee 2884 x86_linux_delete_thread,
3a8a0396 2885 x86_linux_new_fork,
1570b33e 2886 x86_linux_prepare_to_resume,
219f2f23 2887 x86_linux_process_qsupported,
fa593d66
PA
2888 x86_supports_tracepoints,
2889 x86_get_thread_area,
6a271cae 2890 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2891 x86_emit_ops,
2892 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2893 x86_supports_range_stepping,
7d00775e
AT
2894 NULL, /* breakpoint_kind_from_current_state */
2895 x86_supports_hardware_single_step,
82075af2 2896 x86_get_syscall_trapinfo,
ae91f625 2897 x86_get_ipa_tdesc_idx,
d0722149 2898};
3aee8918
PA
2899
2900void
2901initialize_low_arch (void)
2902{
2903 /* Initialize the Linux target descriptions. */
2904#ifdef __x86_64__
cc397f3a 2905 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2906 copy_target_description (tdesc_amd64_linux_no_xml,
2907 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2908 false));
3aee8918
PA
2909 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2910#endif
f49ff000 2911
25a93583 2912#if GDB_SELF_TEST
f49ff000 2913 initialize_low_tdesc ();
25a93583 2914#endif
3aee8918 2915
cc397f3a 2916 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2917 copy_target_description (tdesc_i386_linux_no_xml,
2918 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2919 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2920
2921 initialize_regsets_info (&x86_regsets_info);
2922}
This page took 1.058023 seconds and 4 git commands to generate.