gdbserver/linux-low: turn 'siginfo_fixup' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
3ca4edb6
TBA
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
007c9b97
TBA
111 bool supports_z_point_type (char z_type) override;
112
797bcff5
TBA
113protected:
114
115 void low_arch_setup () override;
daca57a7
TBA
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
126
127 int low_decr_pc_after_break () override;
d7146cda
TBA
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
ef0478f6
TBA
148};
149
150/* The singleton target ops object. */
151
152static x86_target the_x86_target;
153
aa5ca48f
DE
154/* Per-process arch-specific data we want to keep. */
155
156struct arch_process_info
157{
df7e5265 158 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
159};
160
d0722149
DE
161#ifdef __x86_64__
162
163/* Mapping between the general-purpose registers in `struct user'
164 format and GDB's register array layout.
165 Note that the transfer layout uses 64-bit regs. */
166static /*const*/ int i386_regmap[] =
167{
168 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
169 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
170 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
171 DS * 8, ES * 8, FS * 8, GS * 8
172};
173
174#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
175
176/* So code below doesn't have to care, i386 or amd64. */
177#define ORIG_EAX ORIG_RAX
bc9540e8 178#define REGSIZE 8
d0722149
DE
179
180static const int x86_64_regmap[] =
181{
182 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
183 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
184 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
185 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
186 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
187 DS * 8, ES * 8, FS * 8, GS * 8,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
191 -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 ORIG_RAX * 8,
2735833d
WT
194#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
195 21 * 8, 22 * 8,
196#else
197 -1, -1,
198#endif
a196ebeb 199 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
200 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
201 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
206 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
207 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
209 -1, -1, -1, -1, -1, -1, -1, -1,
210 -1 /* pkru */
d0722149
DE
211};
212
213#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 214#define X86_64_USER_REGS (GS + 1)
d0722149
DE
215
216#else /* ! __x86_64__ */
217
218/* Mapping between the general-purpose registers in `struct user'
219 format and GDB's register array layout. */
220static /*const*/ int i386_regmap[] =
221{
222 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
223 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
224 EIP * 4, EFL * 4, CS * 4, SS * 4,
225 DS * 4, ES * 4, FS * 4, GS * 4
226};
227
228#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
229
bc9540e8
PA
230#define REGSIZE 4
231
d0722149 232#endif
3aee8918
PA
233
234#ifdef __x86_64__
235
236/* Returns true if the current inferior belongs to a x86-64 process,
237 per the tdesc. */
238
239static int
240is_64bit_tdesc (void)
241{
0bfdf32f 242 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
243
244 return register_size (regcache->tdesc, 0) == 8;
245}
246
247#endif
248
d0722149
DE
249\f
250/* Called by libthread_db. */
251
252ps_err_e
754653a7 253ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
254 lwpid_t lwpid, int idx, void **base)
255{
256#ifdef __x86_64__
3aee8918 257 int use_64bit = is_64bit_tdesc ();
d0722149
DE
258
259 if (use_64bit)
260 {
261 switch (idx)
262 {
263 case FS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
265 return PS_OK;
266 break;
267 case GS:
268 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
269 return PS_OK;
270 break;
271 default:
272 return PS_BADADDR;
273 }
274 return PS_ERR;
275 }
276#endif
277
278 {
279 unsigned int desc[4];
280
281 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
282 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
283 return PS_ERR;
284
d1ec4ce7
DE
285 /* Ensure we properly extend the value to 64-bits for x86_64. */
286 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
287 return PS_OK;
288 }
289}
fa593d66
PA
290
291/* Get the thread area address. This is used to recognize which
292 thread is which when tracing with the in-process agent library. We
293 don't read anything from the address, and treat it as opaque; it's
294 the address itself that we assume is unique per-thread. */
295
296static int
297x86_get_thread_area (int lwpid, CORE_ADDR *addr)
298{
299#ifdef __x86_64__
3aee8918 300 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
301
302 if (use_64bit)
303 {
304 void *base;
305 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
306 {
307 *addr = (CORE_ADDR) (uintptr_t) base;
308 return 0;
309 }
310
311 return -1;
312 }
313#endif
314
315 {
f2907e49 316 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
317 struct thread_info *thr = get_lwp_thread (lwp);
318 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
319 unsigned int desc[4];
320 ULONGEST gs = 0;
321 const int reg_thread_area = 3; /* bits to scale down register value. */
322 int idx;
323
324 collect_register_by_name (regcache, "gs", &gs);
325
326 idx = gs >> reg_thread_area;
327
328 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 329 lwpid_of (thr),
493e2a69 330 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
331 return -1;
332
333 *addr = desc[1];
334 return 0;
335 }
336}
337
338
d0722149 339\f
daca57a7
TBA
340bool
341x86_target::low_cannot_store_register (int regno)
d0722149 342{
3aee8918
PA
343#ifdef __x86_64__
344 if (is_64bit_tdesc ())
daca57a7 345 return false;
3aee8918
PA
346#endif
347
d0722149
DE
348 return regno >= I386_NUM_REGS;
349}
350
daca57a7
TBA
351bool
352x86_target::low_cannot_fetch_register (int regno)
d0722149 353{
3aee8918
PA
354#ifdef __x86_64__
355 if (is_64bit_tdesc ())
daca57a7 356 return false;
3aee8918
PA
357#endif
358
d0722149
DE
359 return regno >= I386_NUM_REGS;
360}
361
362static void
442ea881 363x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
364{
365 int i;
366
367#ifdef __x86_64__
3aee8918 368 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
369 {
370 for (i = 0; i < X86_64_NUM_REGS; i++)
371 if (x86_64_regmap[i] != -1)
442ea881 372 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
373
374#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
375 {
376 unsigned long base;
377 int lwpid = lwpid_of (current_thread);
378
379 collect_register_by_name (regcache, "fs_base", &base);
380 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
381
382 collect_register_by_name (regcache, "gs_base", &base);
383 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
384 }
385#endif
386
d0722149
DE
387 return;
388 }
9e0aa64f
JK
389
390 /* 32-bit inferior registers need to be zero-extended.
391 Callers would read uninitialized memory otherwise. */
392 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
393#endif
394
395 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 396 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 397
442ea881 398 collect_register_by_name (regcache, "orig_eax",
bc9540e8 399 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 400
e90a813d 401#ifdef __x86_64__
3f52fdbc
KB
402 /* Sign extend EAX value to avoid potential syscall restart
403 problems.
404
405 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
406 for a detailed explanation. */
407 if (register_size (regcache->tdesc, 0) == 4)
408 {
409 void *ptr = ((gdb_byte *) buf
410 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
411
412 *(int64_t *) ptr = *(int32_t *) ptr;
413 }
e90a813d 414#endif
d0722149
DE
415}
416
417static void
442ea881 418x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
419{
420 int i;
421
422#ifdef __x86_64__
3aee8918 423 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
424 {
425 for (i = 0; i < X86_64_NUM_REGS; i++)
426 if (x86_64_regmap[i] != -1)
442ea881 427 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
428
429#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
430 {
431 unsigned long base;
432 int lwpid = lwpid_of (current_thread);
433
434 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
435 supply_register_by_name (regcache, "fs_base", &base);
436
437 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
438 supply_register_by_name (regcache, "gs_base", &base);
439 }
440#endif
d0722149
DE
441 return;
442 }
443#endif
444
445 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 446 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 447
442ea881 448 supply_register_by_name (regcache, "orig_eax",
bc9540e8 449 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
450}
451
452static void
442ea881 453x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
454{
455#ifdef __x86_64__
442ea881 456 i387_cache_to_fxsave (regcache, buf);
d0722149 457#else
442ea881 458 i387_cache_to_fsave (regcache, buf);
d0722149
DE
459#endif
460}
461
462static void
442ea881 463x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
464{
465#ifdef __x86_64__
442ea881 466 i387_fxsave_to_cache (regcache, buf);
d0722149 467#else
442ea881 468 i387_fsave_to_cache (regcache, buf);
d0722149
DE
469#endif
470}
471
472#ifndef __x86_64__
473
474static void
442ea881 475x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 476{
442ea881 477 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
478}
479
480static void
442ea881 481x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 482{
442ea881 483 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
484}
485
486#endif
487
1570b33e
L
488static void
489x86_fill_xstateregset (struct regcache *regcache, void *buf)
490{
491 i387_cache_to_xsave (regcache, buf);
492}
493
494static void
495x86_store_xstateregset (struct regcache *regcache, const void *buf)
496{
497 i387_xsave_to_cache (regcache, buf);
498}
499
d0722149
DE
500/* ??? The non-biarch i386 case stores all the i387 regs twice.
501 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
502 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
503 doesn't work. IWBN to avoid the duplication in the case where it
504 does work. Maybe the arch_setup routine could check whether it works
3aee8918 505 and update the supported regsets accordingly. */
d0722149 506
3aee8918 507static struct regset_info x86_regsets[] =
d0722149
DE
508{
509#ifdef HAVE_PTRACE_GETREGS
1570b33e 510 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
511 GENERAL_REGS,
512 x86_fill_gregset, x86_store_gregset },
1570b33e
L
513 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
514 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
515# ifndef __x86_64__
516# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 517 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
518 EXTENDED_REGS,
519 x86_fill_fpxregset, x86_store_fpxregset },
520# endif
521# endif
1570b33e 522 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
523 FP_REGS,
524 x86_fill_fpregset, x86_store_fpregset },
525#endif /* HAVE_PTRACE_GETREGS */
50bc912a 526 NULL_REGSET
d0722149
DE
527};
528
bf9ae9d8
TBA
529bool
530x86_target::low_supports_breakpoints ()
531{
532 return true;
533}
534
535CORE_ADDR
536x86_target::low_get_pc (regcache *regcache)
d0722149 537{
3aee8918 538 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
539
540 if (use_64bit)
541 {
6598661d
PA
542 uint64_t pc;
543
442ea881 544 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
545 return (CORE_ADDR) pc;
546 }
547 else
548 {
6598661d
PA
549 uint32_t pc;
550
442ea881 551 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
552 return (CORE_ADDR) pc;
553 }
554}
555
bf9ae9d8
TBA
556void
557x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 558{
3aee8918 559 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
560
561 if (use_64bit)
562 {
6598661d
PA
563 uint64_t newpc = pc;
564
442ea881 565 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
566 }
567 else
568 {
6598661d
PA
569 uint32_t newpc = pc;
570
442ea881 571 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
572 }
573}
d4807ea2
TBA
574
575int
576x86_target::low_decr_pc_after_break ()
577{
578 return 1;
579}
580
d0722149 581\f
dd373349 582static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
583#define x86_breakpoint_len 1
584
d7146cda
TBA
585bool
586x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
587{
588 unsigned char c;
589
d7146cda 590 read_memory (pc, &c, 1);
d0722149 591 if (c == 0xCC)
d7146cda 592 return true;
d0722149 593
d7146cda 594 return false;
d0722149
DE
595}
596\f
42995dbd 597/* Low-level function vector. */
df7e5265 598struct x86_dr_low_type x86_dr_low =
42995dbd 599 {
d33472ad
GB
600 x86_linux_dr_set_control,
601 x86_linux_dr_set_addr,
602 x86_linux_dr_get_addr,
603 x86_linux_dr_get_status,
604 x86_linux_dr_get_control,
42995dbd
GB
605 sizeof (void *),
606 };
aa5ca48f 607\f
90d74c30 608/* Breakpoint/Watchpoint support. */
aa5ca48f 609
007c9b97
TBA
610bool
611x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
612{
613 switch (z_type)
614 {
615 case Z_PACKET_SW_BP:
616 case Z_PACKET_HW_BP:
617 case Z_PACKET_WRITE_WP:
618 case Z_PACKET_ACCESS_WP:
007c9b97 619 return true;
802e8e6d 620 default:
007c9b97 621 return false;
802e8e6d
PA
622 }
623}
624
9db9aa23
TBA
625int
626x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
627 int size, raw_breakpoint *bp)
aa5ca48f
DE
628{
629 struct process_info *proc = current_process ();
802e8e6d 630
aa5ca48f
DE
631 switch (type)
632 {
802e8e6d
PA
633 case raw_bkpt_type_hw:
634 case raw_bkpt_type_write_wp:
635 case raw_bkpt_type_access_wp:
a4165e94 636 {
802e8e6d
PA
637 enum target_hw_bp_type hw_type
638 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 639 struct x86_debug_reg_state *state
fe978cb0 640 = &proc->priv->arch_private->debug_reg_state;
a4165e94 641
df7e5265 642 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 643 }
961bd387 644
aa5ca48f
DE
645 default:
646 /* Unsupported. */
647 return 1;
648 }
649}
650
9db9aa23
TBA
651int
652x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
653 int size, raw_breakpoint *bp)
aa5ca48f
DE
654{
655 struct process_info *proc = current_process ();
802e8e6d 656
aa5ca48f
DE
657 switch (type)
658 {
802e8e6d
PA
659 case raw_bkpt_type_hw:
660 case raw_bkpt_type_write_wp:
661 case raw_bkpt_type_access_wp:
a4165e94 662 {
802e8e6d
PA
663 enum target_hw_bp_type hw_type
664 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 665 struct x86_debug_reg_state *state
fe978cb0 666 = &proc->priv->arch_private->debug_reg_state;
a4165e94 667
df7e5265 668 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 669 }
aa5ca48f
DE
670 default:
671 /* Unsupported. */
672 return 1;
673 }
674}
675
ac1bbaca
TBA
676bool
677x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
678{
679 struct process_info *proc = current_process ();
fe978cb0 680 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
681}
682
ac1bbaca
TBA
683CORE_ADDR
684x86_target::low_stopped_data_address ()
aa5ca48f
DE
685{
686 struct process_info *proc = current_process ();
687 CORE_ADDR addr;
fe978cb0 688 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 689 &addr))
aa5ca48f
DE
690 return addr;
691 return 0;
692}
693\f
694/* Called when a new process is created. */
695
696static struct arch_process_info *
697x86_linux_new_process (void)
698{
ed859da7 699 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 700
df7e5265 701 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
702
703 return info;
704}
705
04ec7890
SM
706/* Called when a process is being deleted. */
707
708static void
709x86_linux_delete_process (struct arch_process_info *info)
710{
711 xfree (info);
712}
713
3a8a0396
DB
714/* Target routine for linux_new_fork. */
715
716static void
717x86_linux_new_fork (struct process_info *parent, struct process_info *child)
718{
719 /* These are allocated by linux_add_process. */
720 gdb_assert (parent->priv != NULL
721 && parent->priv->arch_private != NULL);
722 gdb_assert (child->priv != NULL
723 && child->priv->arch_private != NULL);
724
725 /* Linux kernel before 2.6.33 commit
726 72f674d203cd230426437cdcf7dd6f681dad8b0d
727 will inherit hardware debug registers from parent
728 on fork/vfork/clone. Newer Linux kernels create such tasks with
729 zeroed debug registers.
730
731 GDB core assumes the child inherits the watchpoints/hw
732 breakpoints of the parent, and will remove them all from the
733 forked off process. Copy the debug registers mirrors into the
734 new process so that all breakpoints and watchpoints can be
735 removed together. The debug registers mirror will become zeroed
736 in the end before detaching the forked off process, thus making
737 this compatible with older Linux kernels too. */
738
739 *child->priv->arch_private = *parent->priv->arch_private;
740}
741
70a0bb6b
GB
742/* See nat/x86-dregs.h. */
743
744struct x86_debug_reg_state *
745x86_debug_reg_state (pid_t pid)
746{
747 struct process_info *proc = find_process_pid (pid);
748
749 return &proc->priv->arch_private->debug_reg_state;
750}
aa5ca48f 751\f
d0722149
DE
752/* When GDBSERVER is built as a 64-bit application on linux, the
753 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
754 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
755 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
756 conversion in-place ourselves. */
757
9cf12d57 758/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
759 layout of the inferiors' architecture. Returns true if any
760 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 761 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
762 INF. */
763
cb63de7c
TBA
764bool
765x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
766{
767#ifdef __x86_64__
760256f9 768 unsigned int machine;
0bfdf32f 769 int tid = lwpid_of (current_thread);
760256f9
PA
770 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
771
d0722149 772 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 773 if (!is_64bit_tdesc ())
9cf12d57 774 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 775 FIXUP_32);
c92b5177 776 /* No fixup for native x32 GDB. */
760256f9 777 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 778 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 779 FIXUP_X32);
d0722149
DE
780#endif
781
cb63de7c 782 return false;
d0722149
DE
783}
784\f
1570b33e
L
785static int use_xml;
786
3aee8918
PA
787/* Format of XSAVE extended state is:
788 struct
789 {
790 fxsave_bytes[0..463]
791 sw_usable_bytes[464..511]
792 xstate_hdr_bytes[512..575]
793 avx_bytes[576..831]
794 future_state etc
795 };
796
797 Same memory layout will be used for the coredump NT_X86_XSTATE
798 representing the XSAVE extended state registers.
799
800 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
801 extended state mask, which is the same as the extended control register
802 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
803 together with the mask saved in the xstate_hdr_bytes to determine what
804 states the processor/OS supports and what state, used or initialized,
805 the process/thread is in. */
806#define I386_LINUX_XSAVE_XCR0_OFFSET 464
807
808/* Does the current host support the GETFPXREGS request? The header
809 file may or may not define it, and even if it is defined, the
810 kernel will return EIO if it's running on a pre-SSE processor. */
811int have_ptrace_getfpxregs =
812#ifdef HAVE_PTRACE_GETFPXREGS
813 -1
814#else
815 0
816#endif
817;
1570b33e 818
3aee8918
PA
819/* Get Linux/x86 target description from running target. */
820
821static const struct target_desc *
822x86_linux_read_description (void)
1570b33e 823{
3aee8918
PA
824 unsigned int machine;
825 int is_elf64;
a196ebeb 826 int xcr0_features;
3aee8918
PA
827 int tid;
828 static uint64_t xcr0;
3a13a53b 829 struct regset_info *regset;
1570b33e 830
0bfdf32f 831 tid = lwpid_of (current_thread);
1570b33e 832
3aee8918 833 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 834
3aee8918 835 if (sizeof (void *) == 4)
3a13a53b 836 {
3aee8918
PA
837 if (is_elf64 > 0)
838 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
839#ifndef __x86_64__
840 else if (machine == EM_X86_64)
841 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
842#endif
843 }
3a13a53b 844
3aee8918
PA
845#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
846 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
847 {
848 elf_fpxregset_t fpxregs;
3a13a53b 849
3aee8918 850 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 851 {
3aee8918
PA
852 have_ptrace_getfpxregs = 0;
853 have_ptrace_getregset = 0;
f49ff000 854 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 855 }
3aee8918
PA
856 else
857 have_ptrace_getfpxregs = 1;
3a13a53b 858 }
1570b33e
L
859#endif
860
861 if (!use_xml)
862 {
df7e5265 863 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 864
1570b33e
L
865 /* Don't use XML. */
866#ifdef __x86_64__
3aee8918
PA
867 if (machine == EM_X86_64)
868 return tdesc_amd64_linux_no_xml;
1570b33e 869 else
1570b33e 870#endif
3aee8918 871 return tdesc_i386_linux_no_xml;
1570b33e
L
872 }
873
1570b33e
L
874 if (have_ptrace_getregset == -1)
875 {
df7e5265 876 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 877 struct iovec iov;
1570b33e
L
878
879 iov.iov_base = xstateregs;
880 iov.iov_len = sizeof (xstateregs);
881
882 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
883 if (ptrace (PTRACE_GETREGSET, tid,
884 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
885 have_ptrace_getregset = 0;
886 else
1570b33e 887 {
3aee8918
PA
888 have_ptrace_getregset = 1;
889
890 /* Get XCR0 from XSAVE extended state. */
891 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
892 / sizeof (uint64_t))];
893
894 /* Use PTRACE_GETREGSET if it is available. */
895 for (regset = x86_regsets;
896 regset->fill_function != NULL; regset++)
897 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 898 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
899 else if (regset->type != GENERAL_REGS)
900 regset->size = 0;
1570b33e 901 }
1570b33e
L
902 }
903
3aee8918 904 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 905 xcr0_features = (have_ptrace_getregset
2e1e43e1 906 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 907
a196ebeb 908 if (xcr0_features)
3aee8918 909 x86_xcr0 = xcr0;
1570b33e 910
3aee8918
PA
911 if (machine == EM_X86_64)
912 {
1570b33e 913#ifdef __x86_64__
b4570e4b 914 const target_desc *tdesc = NULL;
a196ebeb 915
b4570e4b 916 if (xcr0_features)
3aee8918 917 {
b4570e4b
YQ
918 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
919 !is_elf64);
1570b33e 920 }
b4570e4b
YQ
921
922 if (tdesc == NULL)
923 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
924 return tdesc;
3aee8918 925#endif
1570b33e 926 }
3aee8918
PA
927 else
928 {
f49ff000 929 const target_desc *tdesc = NULL;
a1fa17ee 930
f49ff000
YQ
931 if (xcr0_features)
932 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 933
f49ff000
YQ
934 if (tdesc == NULL)
935 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 936
f49ff000 937 return tdesc;
3aee8918
PA
938 }
939
940 gdb_assert_not_reached ("failed to return tdesc");
941}
942
3aee8918
PA
943/* Update all the target description of all processes; a new GDB
944 connected, and it may or not support xml target descriptions. */
945
797bcff5
TBA
946void
947x86_target::update_xmltarget ()
3aee8918 948{
0bfdf32f 949 struct thread_info *saved_thread = current_thread;
3aee8918
PA
950
951 /* Before changing the register cache's internal layout, flush the
952 contents of the current valid caches back to the threads, and
953 release the current regcache objects. */
954 regcache_release ();
955
797bcff5 956 for_each_process ([this] (process_info *proc) {
9179355e
SM
957 int pid = proc->pid;
958
959 /* Look up any thread of this process. */
960 current_thread = find_any_thread_of_pid (pid);
961
797bcff5 962 low_arch_setup ();
9179355e 963 });
3aee8918 964
0bfdf32f 965 current_thread = saved_thread;
1570b33e
L
966}
967
968/* Process qSupported query, "xmlRegisters=". Update the buffer size for
969 PTRACE_GETREGSET. */
970
971static void
06e03fff 972x86_linux_process_qsupported (char **features, int count)
1570b33e 973{
06e03fff
PA
974 int i;
975
1570b33e
L
976 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
977 with "i386" in qSupported query, it supports x86 XML target
978 descriptions. */
979 use_xml = 0;
06e03fff 980 for (i = 0; i < count; i++)
1570b33e 981 {
06e03fff 982 const char *feature = features[i];
1570b33e 983
06e03fff 984 if (startswith (feature, "xmlRegisters="))
1570b33e 985 {
06e03fff 986 char *copy = xstrdup (feature + 13);
06e03fff 987
ca3a04f6
CB
988 char *saveptr;
989 for (char *p = strtok_r (copy, ",", &saveptr);
990 p != NULL;
991 p = strtok_r (NULL, ",", &saveptr))
1570b33e 992 {
06e03fff
PA
993 if (strcmp (p, "i386") == 0)
994 {
995 use_xml = 1;
996 break;
997 }
1570b33e 998 }
1570b33e 999
06e03fff
PA
1000 free (copy);
1001 }
1570b33e 1002 }
797bcff5 1003 the_x86_target.update_xmltarget ();
1570b33e
L
1004}
1005
3aee8918 1006/* Common for x86/x86-64. */
d0722149 1007
3aee8918
PA
1008static struct regsets_info x86_regsets_info =
1009 {
1010 x86_regsets, /* regsets */
1011 0, /* num_regsets */
1012 NULL, /* disabled_regsets */
1013 };
214d508e
L
1014
1015#ifdef __x86_64__
3aee8918
PA
1016static struct regs_info amd64_linux_regs_info =
1017 {
1018 NULL, /* regset_bitmap */
1019 NULL, /* usrregs_info */
1020 &x86_regsets_info
1021 };
d0722149 1022#endif
3aee8918
PA
1023static struct usrregs_info i386_linux_usrregs_info =
1024 {
1025 I386_NUM_REGS,
1026 i386_regmap,
1027 };
d0722149 1028
3aee8918
PA
1029static struct regs_info i386_linux_regs_info =
1030 {
1031 NULL, /* regset_bitmap */
1032 &i386_linux_usrregs_info,
1033 &x86_regsets_info
1034 };
d0722149 1035
aa8d21c9
TBA
1036const regs_info *
1037x86_target::get_regs_info ()
3aee8918
PA
1038{
1039#ifdef __x86_64__
1040 if (is_64bit_tdesc ())
1041 return &amd64_linux_regs_info;
1042 else
1043#endif
1044 return &i386_linux_regs_info;
1045}
d0722149 1046
3aee8918
PA
1047/* Initialize the target description for the architecture of the
1048 inferior. */
1570b33e 1049
797bcff5
TBA
1050void
1051x86_target::low_arch_setup ()
3aee8918
PA
1052{
1053 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1054}
1055
82075af2
JS
1056/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1057 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1058
1059static void
4cc32bec 1060x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1061{
1062 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1063
1064 if (use_64bit)
1065 {
1066 long l_sysno;
82075af2
JS
1067
1068 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1069 *sysno = (int) l_sysno;
82075af2
JS
1070 }
1071 else
4cc32bec 1072 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1073}
1074
219f2f23
PA
1075static int
1076x86_supports_tracepoints (void)
1077{
1078 return 1;
1079}
1080
fa593d66
PA
1081static void
1082append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1083{
4196ab2a 1084 target_write_memory (*to, buf, len);
fa593d66
PA
1085 *to += len;
1086}
1087
1088static int
a121b7c1 1089push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1090{
1091 unsigned char *buf_org = buf;
1092
1093 while (1)
1094 {
1095 char *endptr;
1096 unsigned long ul = strtoul (op, &endptr, 16);
1097
1098 if (endptr == op)
1099 break;
1100
1101 *buf++ = ul;
1102 op = endptr;
1103 }
1104
1105 return buf - buf_org;
1106}
1107
1108#ifdef __x86_64__
1109
1110/* Build a jump pad that saves registers and calls a collection
1111 function. Writes a jump instruction to the jump pad to
1112 JJUMPAD_INSN. The caller is responsible to write it in at the
1113 tracepoint address. */
1114
1115static int
1116amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1117 CORE_ADDR collector,
1118 CORE_ADDR lockaddr,
1119 ULONGEST orig_size,
1120 CORE_ADDR *jump_entry,
405f8e94
SS
1121 CORE_ADDR *trampoline,
1122 ULONGEST *trampoline_size,
fa593d66
PA
1123 unsigned char *jjump_pad_insn,
1124 ULONGEST *jjump_pad_insn_size,
1125 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1126 CORE_ADDR *adjusted_insn_addr_end,
1127 char *err)
fa593d66
PA
1128{
1129 unsigned char buf[40];
1130 int i, offset;
f4647387
YQ
1131 int64_t loffset;
1132
fa593d66
PA
1133 CORE_ADDR buildaddr = *jump_entry;
1134
1135 /* Build the jump pad. */
1136
1137 /* First, do tracepoint data collection. Save registers. */
1138 i = 0;
1139 /* Need to ensure stack pointer saved first. */
1140 buf[i++] = 0x54; /* push %rsp */
1141 buf[i++] = 0x55; /* push %rbp */
1142 buf[i++] = 0x57; /* push %rdi */
1143 buf[i++] = 0x56; /* push %rsi */
1144 buf[i++] = 0x52; /* push %rdx */
1145 buf[i++] = 0x51; /* push %rcx */
1146 buf[i++] = 0x53; /* push %rbx */
1147 buf[i++] = 0x50; /* push %rax */
1148 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1149 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1150 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1151 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1152 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1153 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1154 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1155 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1156 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1157 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1158 buf[i++] = 0xbf;
c8ef42ee
PA
1159 memcpy (buf + i, &tpaddr, 8);
1160 i += 8;
fa593d66
PA
1161 buf[i++] = 0x57; /* push %rdi */
1162 append_insns (&buildaddr, i, buf);
1163
1164 /* Stack space for the collecting_t object. */
1165 i = 0;
1166 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1167 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1168 memcpy (buf + i, &tpoint, 8);
1169 i += 8;
1170 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1171 i += push_opcode (&buf[i],
1172 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1173 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1174 append_insns (&buildaddr, i, buf);
1175
1176 /* spin-lock. */
1177 i = 0;
1178 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1179 memcpy (&buf[i], (void *) &lockaddr, 8);
1180 i += 8;
1181 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1182 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1183 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1184 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1185 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1186 append_insns (&buildaddr, i, buf);
1187
1188 /* Set up the gdb_collect call. */
1189 /* At this point, (stack pointer + 0x18) is the base of our saved
1190 register block. */
1191
1192 i = 0;
1193 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1194 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1195
1196 /* tpoint address may be 64-bit wide. */
1197 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1198 memcpy (buf + i, &tpoint, 8);
1199 i += 8;
1200 append_insns (&buildaddr, i, buf);
1201
1202 /* The collector function being in the shared library, may be
1203 >31-bits away off the jump pad. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1206 memcpy (buf + i, &collector, 8);
1207 i += 8;
1208 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1209 append_insns (&buildaddr, i, buf);
1210
1211 /* Clear the spin-lock. */
1212 i = 0;
1213 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1214 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1215 memcpy (buf + i, &lockaddr, 8);
1216 i += 8;
1217 append_insns (&buildaddr, i, buf);
1218
1219 /* Remove stack that had been used for the collect_t object. */
1220 i = 0;
1221 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1222 append_insns (&buildaddr, i, buf);
1223
1224 /* Restore register state. */
1225 i = 0;
1226 buf[i++] = 0x48; /* add $0x8,%rsp */
1227 buf[i++] = 0x83;
1228 buf[i++] = 0xc4;
1229 buf[i++] = 0x08;
1230 buf[i++] = 0x9d; /* popfq */
1231 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1232 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1233 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1234 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1235 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1236 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1237 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1238 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1239 buf[i++] = 0x58; /* pop %rax */
1240 buf[i++] = 0x5b; /* pop %rbx */
1241 buf[i++] = 0x59; /* pop %rcx */
1242 buf[i++] = 0x5a; /* pop %rdx */
1243 buf[i++] = 0x5e; /* pop %rsi */
1244 buf[i++] = 0x5f; /* pop %rdi */
1245 buf[i++] = 0x5d; /* pop %rbp */
1246 buf[i++] = 0x5c; /* pop %rsp */
1247 append_insns (&buildaddr, i, buf);
1248
1249 /* Now, adjust the original instruction to execute in the jump
1250 pad. */
1251 *adjusted_insn_addr = buildaddr;
1252 relocate_instruction (&buildaddr, tpaddr);
1253 *adjusted_insn_addr_end = buildaddr;
1254
1255 /* Finally, write a jump back to the program. */
f4647387
YQ
1256
1257 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1258 if (loffset > INT_MAX || loffset < INT_MIN)
1259 {
1260 sprintf (err,
1261 "E.Jump back from jump pad too far from tracepoint "
1262 "(offset 0x%" PRIx64 " > int32).", loffset);
1263 return 1;
1264 }
1265
1266 offset = (int) loffset;
fa593d66
PA
1267 memcpy (buf, jump_insn, sizeof (jump_insn));
1268 memcpy (buf + 1, &offset, 4);
1269 append_insns (&buildaddr, sizeof (jump_insn), buf);
1270
1271 /* The jump pad is now built. Wire in a jump to our jump pad. This
1272 is always done last (by our caller actually), so that we can
1273 install fast tracepoints with threads running. This relies on
1274 the agent's atomic write support. */
f4647387
YQ
1275 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1276 if (loffset > INT_MAX || loffset < INT_MIN)
1277 {
1278 sprintf (err,
1279 "E.Jump pad too far from tracepoint "
1280 "(offset 0x%" PRIx64 " > int32).", loffset);
1281 return 1;
1282 }
1283
1284 offset = (int) loffset;
1285
fa593d66
PA
1286 memcpy (buf, jump_insn, sizeof (jump_insn));
1287 memcpy (buf + 1, &offset, 4);
1288 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1289 *jjump_pad_insn_size = sizeof (jump_insn);
1290
1291 /* Return the end address of our pad. */
1292 *jump_entry = buildaddr;
1293
1294 return 0;
1295}
1296
1297#endif /* __x86_64__ */
1298
1299/* Build a jump pad that saves registers and calls a collection
1300 function. Writes a jump instruction to the jump pad to
1301 JJUMPAD_INSN. The caller is responsible to write it in at the
1302 tracepoint address. */
1303
1304static int
1305i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1306 CORE_ADDR collector,
1307 CORE_ADDR lockaddr,
1308 ULONGEST orig_size,
1309 CORE_ADDR *jump_entry,
405f8e94
SS
1310 CORE_ADDR *trampoline,
1311 ULONGEST *trampoline_size,
fa593d66
PA
1312 unsigned char *jjump_pad_insn,
1313 ULONGEST *jjump_pad_insn_size,
1314 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1315 CORE_ADDR *adjusted_insn_addr_end,
1316 char *err)
fa593d66
PA
1317{
1318 unsigned char buf[0x100];
1319 int i, offset;
1320 CORE_ADDR buildaddr = *jump_entry;
1321
1322 /* Build the jump pad. */
1323
1324 /* First, do tracepoint data collection. Save registers. */
1325 i = 0;
1326 buf[i++] = 0x60; /* pushad */
1327 buf[i++] = 0x68; /* push tpaddr aka $pc */
1328 *((int *)(buf + i)) = (int) tpaddr;
1329 i += 4;
1330 buf[i++] = 0x9c; /* pushf */
1331 buf[i++] = 0x1e; /* push %ds */
1332 buf[i++] = 0x06; /* push %es */
1333 buf[i++] = 0x0f; /* push %fs */
1334 buf[i++] = 0xa0;
1335 buf[i++] = 0x0f; /* push %gs */
1336 buf[i++] = 0xa8;
1337 buf[i++] = 0x16; /* push %ss */
1338 buf[i++] = 0x0e; /* push %cs */
1339 append_insns (&buildaddr, i, buf);
1340
1341 /* Stack space for the collecting_t object. */
1342 i = 0;
1343 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1344
1345 /* Build the object. */
1346 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1347 memcpy (buf + i, &tpoint, 4);
1348 i += 4;
1349 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1350
1351 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1352 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1353 append_insns (&buildaddr, i, buf);
1354
1355 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1356 If we cared for it, this could be using xchg alternatively. */
1357
1358 i = 0;
1359 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1360 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1361 %esp,<lockaddr> */
1362 memcpy (&buf[i], (void *) &lockaddr, 4);
1363 i += 4;
1364 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1365 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1366 append_insns (&buildaddr, i, buf);
1367
1368
1369 /* Set up arguments to the gdb_collect call. */
1370 i = 0;
1371 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1372 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1373 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1374 append_insns (&buildaddr, i, buf);
1375
1376 i = 0;
1377 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1378 append_insns (&buildaddr, i, buf);
1379
1380 i = 0;
1381 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1382 memcpy (&buf[i], (void *) &tpoint, 4);
1383 i += 4;
1384 append_insns (&buildaddr, i, buf);
1385
1386 buf[0] = 0xe8; /* call <reladdr> */
1387 offset = collector - (buildaddr + sizeof (jump_insn));
1388 memcpy (buf + 1, &offset, 4);
1389 append_insns (&buildaddr, 5, buf);
1390 /* Clean up after the call. */
1391 buf[0] = 0x83; /* add $0x8,%esp */
1392 buf[1] = 0xc4;
1393 buf[2] = 0x08;
1394 append_insns (&buildaddr, 3, buf);
1395
1396
1397 /* Clear the spin-lock. This would need the LOCK prefix on older
1398 broken archs. */
1399 i = 0;
1400 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1401 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1402 memcpy (buf + i, &lockaddr, 4);
1403 i += 4;
1404 append_insns (&buildaddr, i, buf);
1405
1406
1407 /* Remove stack that had been used for the collect_t object. */
1408 i = 0;
1409 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1410 append_insns (&buildaddr, i, buf);
1411
1412 i = 0;
1413 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1414 buf[i++] = 0xc4;
1415 buf[i++] = 0x04;
1416 buf[i++] = 0x17; /* pop %ss */
1417 buf[i++] = 0x0f; /* pop %gs */
1418 buf[i++] = 0xa9;
1419 buf[i++] = 0x0f; /* pop %fs */
1420 buf[i++] = 0xa1;
1421 buf[i++] = 0x07; /* pop %es */
405f8e94 1422 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1423 buf[i++] = 0x9d; /* popf */
1424 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1425 buf[i++] = 0xc4;
1426 buf[i++] = 0x04;
1427 buf[i++] = 0x61; /* popad */
1428 append_insns (&buildaddr, i, buf);
1429
1430 /* Now, adjust the original instruction to execute in the jump
1431 pad. */
1432 *adjusted_insn_addr = buildaddr;
1433 relocate_instruction (&buildaddr, tpaddr);
1434 *adjusted_insn_addr_end = buildaddr;
1435
1436 /* Write the jump back to the program. */
1437 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1438 memcpy (buf, jump_insn, sizeof (jump_insn));
1439 memcpy (buf + 1, &offset, 4);
1440 append_insns (&buildaddr, sizeof (jump_insn), buf);
1441
1442 /* The jump pad is now built. Wire in a jump to our jump pad. This
1443 is always done last (by our caller actually), so that we can
1444 install fast tracepoints with threads running. This relies on
1445 the agent's atomic write support. */
405f8e94
SS
1446 if (orig_size == 4)
1447 {
1448 /* Create a trampoline. */
1449 *trampoline_size = sizeof (jump_insn);
1450 if (!claim_trampoline_space (*trampoline_size, trampoline))
1451 {
1452 /* No trampoline space available. */
1453 strcpy (err,
1454 "E.Cannot allocate trampoline space needed for fast "
1455 "tracepoints on 4-byte instructions.");
1456 return 1;
1457 }
1458
1459 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1460 memcpy (buf, jump_insn, sizeof (jump_insn));
1461 memcpy (buf + 1, &offset, 4);
4196ab2a 1462 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1463
1464 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1465 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1466 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1467 memcpy (buf + 2, &offset, 2);
1468 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1469 *jjump_pad_insn_size = sizeof (small_jump_insn);
1470 }
1471 else
1472 {
1473 /* Else use a 32-bit relative jump instruction. */
1474 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1475 memcpy (buf, jump_insn, sizeof (jump_insn));
1476 memcpy (buf + 1, &offset, 4);
1477 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1478 *jjump_pad_insn_size = sizeof (jump_insn);
1479 }
fa593d66
PA
1480
1481 /* Return the end address of our pad. */
1482 *jump_entry = buildaddr;
1483
1484 return 0;
1485}
1486
1487static int
1488x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1489 CORE_ADDR collector,
1490 CORE_ADDR lockaddr,
1491 ULONGEST orig_size,
1492 CORE_ADDR *jump_entry,
405f8e94
SS
1493 CORE_ADDR *trampoline,
1494 ULONGEST *trampoline_size,
fa593d66
PA
1495 unsigned char *jjump_pad_insn,
1496 ULONGEST *jjump_pad_insn_size,
1497 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1498 CORE_ADDR *adjusted_insn_addr_end,
1499 char *err)
fa593d66
PA
1500{
1501#ifdef __x86_64__
3aee8918 1502 if (is_64bit_tdesc ())
fa593d66
PA
1503 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1504 collector, lockaddr,
1505 orig_size, jump_entry,
405f8e94 1506 trampoline, trampoline_size,
fa593d66
PA
1507 jjump_pad_insn,
1508 jjump_pad_insn_size,
1509 adjusted_insn_addr,
405f8e94
SS
1510 adjusted_insn_addr_end,
1511 err);
fa593d66
PA
1512#endif
1513
1514 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1515 collector, lockaddr,
1516 orig_size, jump_entry,
405f8e94 1517 trampoline, trampoline_size,
fa593d66
PA
1518 jjump_pad_insn,
1519 jjump_pad_insn_size,
1520 adjusted_insn_addr,
405f8e94
SS
1521 adjusted_insn_addr_end,
1522 err);
1523}
1524
1525/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1526 architectures. */
1527
1528static int
1529x86_get_min_fast_tracepoint_insn_len (void)
1530{
1531 static int warned_about_fast_tracepoints = 0;
1532
1533#ifdef __x86_64__
1534 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1535 used for fast tracepoints. */
3aee8918 1536 if (is_64bit_tdesc ())
405f8e94
SS
1537 return 5;
1538#endif
1539
58b4daa5 1540 if (agent_loaded_p ())
405f8e94
SS
1541 {
1542 char errbuf[IPA_BUFSIZ];
1543
1544 errbuf[0] = '\0';
1545
1546 /* On x86, if trampolines are available, then 4-byte jump instructions
1547 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1548 with a 4-byte offset are used instead. */
1549 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1550 return 4;
1551 else
1552 {
1553 /* GDB has no channel to explain to user why a shorter fast
1554 tracepoint is not possible, but at least make GDBserver
1555 mention that something has gone awry. */
1556 if (!warned_about_fast_tracepoints)
1557 {
422186a9 1558 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1559 warned_about_fast_tracepoints = 1;
1560 }
1561 return 5;
1562 }
1563 }
1564 else
1565 {
1566 /* Indicate that the minimum length is currently unknown since the IPA
1567 has not loaded yet. */
1568 return 0;
1569 }
fa593d66
PA
1570}
1571
6a271cae
PA
1572static void
1573add_insns (unsigned char *start, int len)
1574{
1575 CORE_ADDR buildaddr = current_insn_ptr;
1576
1577 if (debug_threads)
87ce2a04
DE
1578 debug_printf ("Adding %d bytes of insn at %s\n",
1579 len, paddress (buildaddr));
6a271cae
PA
1580
1581 append_insns (&buildaddr, len, start);
1582 current_insn_ptr = buildaddr;
1583}
1584
6a271cae
PA
1585/* Our general strategy for emitting code is to avoid specifying raw
1586 bytes whenever possible, and instead copy a block of inline asm
1587 that is embedded in the function. This is a little messy, because
1588 we need to keep the compiler from discarding what looks like dead
1589 code, plus suppress various warnings. */
1590
9e4344e5
PA
1591#define EMIT_ASM(NAME, INSNS) \
1592 do \
1593 { \
1594 extern unsigned char start_ ## NAME, end_ ## NAME; \
1595 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1596 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1597 "\t" "start_" #NAME ":" \
1598 "\t" INSNS "\n" \
1599 "\t" "end_" #NAME ":"); \
1600 } while (0)
6a271cae
PA
1601
1602#ifdef __x86_64__
1603
1604#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1605 do \
1606 { \
1607 extern unsigned char start_ ## NAME, end_ ## NAME; \
1608 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1609 __asm__ (".code32\n" \
1610 "\t" "jmp end_" #NAME "\n" \
1611 "\t" "start_" #NAME ":\n" \
1612 "\t" INSNS "\n" \
1613 "\t" "end_" #NAME ":\n" \
1614 ".code64\n"); \
1615 } while (0)
6a271cae
PA
1616
1617#else
1618
1619#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1620
1621#endif
1622
1623#ifdef __x86_64__
1624
1625static void
1626amd64_emit_prologue (void)
1627{
1628 EMIT_ASM (amd64_prologue,
1629 "pushq %rbp\n\t"
1630 "movq %rsp,%rbp\n\t"
1631 "sub $0x20,%rsp\n\t"
1632 "movq %rdi,-8(%rbp)\n\t"
1633 "movq %rsi,-16(%rbp)");
1634}
1635
1636
1637static void
1638amd64_emit_epilogue (void)
1639{
1640 EMIT_ASM (amd64_epilogue,
1641 "movq -16(%rbp),%rdi\n\t"
1642 "movq %rax,(%rdi)\n\t"
1643 "xor %rax,%rax\n\t"
1644 "leave\n\t"
1645 "ret");
1646}
1647
1648static void
1649amd64_emit_add (void)
1650{
1651 EMIT_ASM (amd64_add,
1652 "add (%rsp),%rax\n\t"
1653 "lea 0x8(%rsp),%rsp");
1654}
1655
1656static void
1657amd64_emit_sub (void)
1658{
1659 EMIT_ASM (amd64_sub,
1660 "sub %rax,(%rsp)\n\t"
1661 "pop %rax");
1662}
1663
1664static void
1665amd64_emit_mul (void)
1666{
1667 emit_error = 1;
1668}
1669
1670static void
1671amd64_emit_lsh (void)
1672{
1673 emit_error = 1;
1674}
1675
1676static void
1677amd64_emit_rsh_signed (void)
1678{
1679 emit_error = 1;
1680}
1681
1682static void
1683amd64_emit_rsh_unsigned (void)
1684{
1685 emit_error = 1;
1686}
1687
1688static void
1689amd64_emit_ext (int arg)
1690{
1691 switch (arg)
1692 {
1693 case 8:
1694 EMIT_ASM (amd64_ext_8,
1695 "cbtw\n\t"
1696 "cwtl\n\t"
1697 "cltq");
1698 break;
1699 case 16:
1700 EMIT_ASM (amd64_ext_16,
1701 "cwtl\n\t"
1702 "cltq");
1703 break;
1704 case 32:
1705 EMIT_ASM (amd64_ext_32,
1706 "cltq");
1707 break;
1708 default:
1709 emit_error = 1;
1710 }
1711}
1712
1713static void
1714amd64_emit_log_not (void)
1715{
1716 EMIT_ASM (amd64_log_not,
1717 "test %rax,%rax\n\t"
1718 "sete %cl\n\t"
1719 "movzbq %cl,%rax");
1720}
1721
1722static void
1723amd64_emit_bit_and (void)
1724{
1725 EMIT_ASM (amd64_and,
1726 "and (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728}
1729
1730static void
1731amd64_emit_bit_or (void)
1732{
1733 EMIT_ASM (amd64_or,
1734 "or (%rsp),%rax\n\t"
1735 "lea 0x8(%rsp),%rsp");
1736}
1737
1738static void
1739amd64_emit_bit_xor (void)
1740{
1741 EMIT_ASM (amd64_xor,
1742 "xor (%rsp),%rax\n\t"
1743 "lea 0x8(%rsp),%rsp");
1744}
1745
1746static void
1747amd64_emit_bit_not (void)
1748{
1749 EMIT_ASM (amd64_bit_not,
1750 "xorq $0xffffffffffffffff,%rax");
1751}
1752
1753static void
1754amd64_emit_equal (void)
1755{
1756 EMIT_ASM (amd64_equal,
1757 "cmp %rax,(%rsp)\n\t"
1758 "je .Lamd64_equal_true\n\t"
1759 "xor %rax,%rax\n\t"
1760 "jmp .Lamd64_equal_end\n\t"
1761 ".Lamd64_equal_true:\n\t"
1762 "mov $0x1,%rax\n\t"
1763 ".Lamd64_equal_end:\n\t"
1764 "lea 0x8(%rsp),%rsp");
1765}
1766
1767static void
1768amd64_emit_less_signed (void)
1769{
1770 EMIT_ASM (amd64_less_signed,
1771 "cmp %rax,(%rsp)\n\t"
1772 "jl .Lamd64_less_signed_true\n\t"
1773 "xor %rax,%rax\n\t"
1774 "jmp .Lamd64_less_signed_end\n\t"
1775 ".Lamd64_less_signed_true:\n\t"
1776 "mov $1,%rax\n\t"
1777 ".Lamd64_less_signed_end:\n\t"
1778 "lea 0x8(%rsp),%rsp");
1779}
1780
1781static void
1782amd64_emit_less_unsigned (void)
1783{
1784 EMIT_ASM (amd64_less_unsigned,
1785 "cmp %rax,(%rsp)\n\t"
1786 "jb .Lamd64_less_unsigned_true\n\t"
1787 "xor %rax,%rax\n\t"
1788 "jmp .Lamd64_less_unsigned_end\n\t"
1789 ".Lamd64_less_unsigned_true:\n\t"
1790 "mov $1,%rax\n\t"
1791 ".Lamd64_less_unsigned_end:\n\t"
1792 "lea 0x8(%rsp),%rsp");
1793}
1794
1795static void
1796amd64_emit_ref (int size)
1797{
1798 switch (size)
1799 {
1800 case 1:
1801 EMIT_ASM (amd64_ref1,
1802 "movb (%rax),%al");
1803 break;
1804 case 2:
1805 EMIT_ASM (amd64_ref2,
1806 "movw (%rax),%ax");
1807 break;
1808 case 4:
1809 EMIT_ASM (amd64_ref4,
1810 "movl (%rax),%eax");
1811 break;
1812 case 8:
1813 EMIT_ASM (amd64_ref8,
1814 "movq (%rax),%rax");
1815 break;
1816 }
1817}
1818
1819static void
1820amd64_emit_if_goto (int *offset_p, int *size_p)
1821{
1822 EMIT_ASM (amd64_if_goto,
1823 "mov %rax,%rcx\n\t"
1824 "pop %rax\n\t"
1825 "cmp $0,%rcx\n\t"
1826 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1827 if (offset_p)
1828 *offset_p = 10;
1829 if (size_p)
1830 *size_p = 4;
1831}
1832
1833static void
1834amd64_emit_goto (int *offset_p, int *size_p)
1835{
1836 EMIT_ASM (amd64_goto,
1837 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1838 if (offset_p)
1839 *offset_p = 1;
1840 if (size_p)
1841 *size_p = 4;
1842}
1843
1844static void
1845amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1846{
1847 int diff = (to - (from + size));
1848 unsigned char buf[sizeof (int)];
1849
1850 if (size != 4)
1851 {
1852 emit_error = 1;
1853 return;
1854 }
1855
1856 memcpy (buf, &diff, sizeof (int));
4196ab2a 1857 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1858}
1859
1860static void
4e29fb54 1861amd64_emit_const (LONGEST num)
6a271cae
PA
1862{
1863 unsigned char buf[16];
1864 int i;
1865 CORE_ADDR buildaddr = current_insn_ptr;
1866
1867 i = 0;
1868 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1869 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1870 i += 8;
1871 append_insns (&buildaddr, i, buf);
1872 current_insn_ptr = buildaddr;
1873}
1874
1875static void
1876amd64_emit_call (CORE_ADDR fn)
1877{
1878 unsigned char buf[16];
1879 int i;
1880 CORE_ADDR buildaddr;
4e29fb54 1881 LONGEST offset64;
6a271cae
PA
1882
1883 /* The destination function being in the shared library, may be
1884 >31-bits away off the compiled code pad. */
1885
1886 buildaddr = current_insn_ptr;
1887
1888 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1889
1890 i = 0;
1891
1892 if (offset64 > INT_MAX || offset64 < INT_MIN)
1893 {
1894 /* Offset is too large for a call. Use callq, but that requires
1895 a register, so avoid it if possible. Use r10, since it is
1896 call-clobbered, we don't have to push/pop it. */
1897 buf[i++] = 0x48; /* mov $fn,%r10 */
1898 buf[i++] = 0xba;
1899 memcpy (buf + i, &fn, 8);
1900 i += 8;
1901 buf[i++] = 0xff; /* callq *%r10 */
1902 buf[i++] = 0xd2;
1903 }
1904 else
1905 {
1906 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1907
1908 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1909 memcpy (buf + i, &offset32, 4);
1910 i += 4;
1911 }
1912
1913 append_insns (&buildaddr, i, buf);
1914 current_insn_ptr = buildaddr;
1915}
1916
1917static void
1918amd64_emit_reg (int reg)
1919{
1920 unsigned char buf[16];
1921 int i;
1922 CORE_ADDR buildaddr;
1923
1924 /* Assume raw_regs is still in %rdi. */
1925 buildaddr = current_insn_ptr;
1926 i = 0;
1927 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1928 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1929 i += 4;
1930 append_insns (&buildaddr, i, buf);
1931 current_insn_ptr = buildaddr;
1932 amd64_emit_call (get_raw_reg_func_addr ());
1933}
1934
1935static void
1936amd64_emit_pop (void)
1937{
1938 EMIT_ASM (amd64_pop,
1939 "pop %rax");
1940}
1941
1942static void
1943amd64_emit_stack_flush (void)
1944{
1945 EMIT_ASM (amd64_stack_flush,
1946 "push %rax");
1947}
1948
1949static void
1950amd64_emit_zero_ext (int arg)
1951{
1952 switch (arg)
1953 {
1954 case 8:
1955 EMIT_ASM (amd64_zero_ext_8,
1956 "and $0xff,%rax");
1957 break;
1958 case 16:
1959 EMIT_ASM (amd64_zero_ext_16,
1960 "and $0xffff,%rax");
1961 break;
1962 case 32:
1963 EMIT_ASM (amd64_zero_ext_32,
1964 "mov $0xffffffff,%rcx\n\t"
1965 "and %rcx,%rax");
1966 break;
1967 default:
1968 emit_error = 1;
1969 }
1970}
1971
1972static void
1973amd64_emit_swap (void)
1974{
1975 EMIT_ASM (amd64_swap,
1976 "mov %rax,%rcx\n\t"
1977 "pop %rax\n\t"
1978 "push %rcx");
1979}
1980
1981static void
1982amd64_emit_stack_adjust (int n)
1983{
1984 unsigned char buf[16];
1985 int i;
1986 CORE_ADDR buildaddr = current_insn_ptr;
1987
1988 i = 0;
1989 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1990 buf[i++] = 0x8d;
1991 buf[i++] = 0x64;
1992 buf[i++] = 0x24;
1993 /* This only handles adjustments up to 16, but we don't expect any more. */
1994 buf[i++] = n * 8;
1995 append_insns (&buildaddr, i, buf);
1996 current_insn_ptr = buildaddr;
1997}
1998
1999/* FN's prototype is `LONGEST(*fn)(int)'. */
2000
2001static void
2002amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2003{
2004 unsigned char buf[16];
2005 int i;
2006 CORE_ADDR buildaddr;
2007
2008 buildaddr = current_insn_ptr;
2009 i = 0;
2010 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2011 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2012 i += 4;
2013 append_insns (&buildaddr, i, buf);
2014 current_insn_ptr = buildaddr;
2015 amd64_emit_call (fn);
2016}
2017
4e29fb54 2018/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2019
2020static void
2021amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2022{
2023 unsigned char buf[16];
2024 int i;
2025 CORE_ADDR buildaddr;
2026
2027 buildaddr = current_insn_ptr;
2028 i = 0;
2029 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2030 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2031 i += 4;
2032 append_insns (&buildaddr, i, buf);
2033 current_insn_ptr = buildaddr;
2034 EMIT_ASM (amd64_void_call_2_a,
2035 /* Save away a copy of the stack top. */
2036 "push %rax\n\t"
2037 /* Also pass top as the second argument. */
2038 "mov %rax,%rsi");
2039 amd64_emit_call (fn);
2040 EMIT_ASM (amd64_void_call_2_b,
2041 /* Restore the stack top, %rax may have been trashed. */
2042 "pop %rax");
2043}
2044
df4a0200 2045static void
6b9801d4
SS
2046amd64_emit_eq_goto (int *offset_p, int *size_p)
2047{
2048 EMIT_ASM (amd64_eq,
2049 "cmp %rax,(%rsp)\n\t"
2050 "jne .Lamd64_eq_fallthru\n\t"
2051 "lea 0x8(%rsp),%rsp\n\t"
2052 "pop %rax\n\t"
2053 /* jmp, but don't trust the assembler to choose the right jump */
2054 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2055 ".Lamd64_eq_fallthru:\n\t"
2056 "lea 0x8(%rsp),%rsp\n\t"
2057 "pop %rax");
2058
2059 if (offset_p)
2060 *offset_p = 13;
2061 if (size_p)
2062 *size_p = 4;
2063}
2064
df4a0200 2065static void
6b9801d4
SS
2066amd64_emit_ne_goto (int *offset_p, int *size_p)
2067{
2068 EMIT_ASM (amd64_ne,
2069 "cmp %rax,(%rsp)\n\t"
2070 "je .Lamd64_ne_fallthru\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2072 "pop %rax\n\t"
2073 /* jmp, but don't trust the assembler to choose the right jump */
2074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2075 ".Lamd64_ne_fallthru:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2077 "pop %rax");
2078
2079 if (offset_p)
2080 *offset_p = 13;
2081 if (size_p)
2082 *size_p = 4;
2083}
2084
df4a0200 2085static void
6b9801d4
SS
2086amd64_emit_lt_goto (int *offset_p, int *size_p)
2087{
2088 EMIT_ASM (amd64_lt,
2089 "cmp %rax,(%rsp)\n\t"
2090 "jnl .Lamd64_lt_fallthru\n\t"
2091 "lea 0x8(%rsp),%rsp\n\t"
2092 "pop %rax\n\t"
2093 /* jmp, but don't trust the assembler to choose the right jump */
2094 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2095 ".Lamd64_lt_fallthru:\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2097 "pop %rax");
2098
2099 if (offset_p)
2100 *offset_p = 13;
2101 if (size_p)
2102 *size_p = 4;
2103}
2104
df4a0200 2105static void
6b9801d4
SS
2106amd64_emit_le_goto (int *offset_p, int *size_p)
2107{
2108 EMIT_ASM (amd64_le,
2109 "cmp %rax,(%rsp)\n\t"
2110 "jnle .Lamd64_le_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_le_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax");
2118
2119 if (offset_p)
2120 *offset_p = 13;
2121 if (size_p)
2122 *size_p = 4;
2123}
2124
df4a0200 2125static void
6b9801d4
SS
2126amd64_emit_gt_goto (int *offset_p, int *size_p)
2127{
2128 EMIT_ASM (amd64_gt,
2129 "cmp %rax,(%rsp)\n\t"
2130 "jng .Lamd64_gt_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2132 "pop %rax\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_gt_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2137 "pop %rax");
2138
2139 if (offset_p)
2140 *offset_p = 13;
2141 if (size_p)
2142 *size_p = 4;
2143}
2144
df4a0200 2145static void
6b9801d4
SS
2146amd64_emit_ge_goto (int *offset_p, int *size_p)
2147{
2148 EMIT_ASM (amd64_ge,
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnge .Lamd64_ge_fallthru\n\t"
2151 ".Lamd64_ge_jump:\n\t"
2152 "lea 0x8(%rsp),%rsp\n\t"
2153 "pop %rax\n\t"
2154 /* jmp, but don't trust the assembler to choose the right jump */
2155 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2156 ".Lamd64_ge_fallthru:\n\t"
2157 "lea 0x8(%rsp),%rsp\n\t"
2158 "pop %rax");
2159
2160 if (offset_p)
2161 *offset_p = 13;
2162 if (size_p)
2163 *size_p = 4;
2164}
2165
6a271cae
PA
2166struct emit_ops amd64_emit_ops =
2167 {
2168 amd64_emit_prologue,
2169 amd64_emit_epilogue,
2170 amd64_emit_add,
2171 amd64_emit_sub,
2172 amd64_emit_mul,
2173 amd64_emit_lsh,
2174 amd64_emit_rsh_signed,
2175 amd64_emit_rsh_unsigned,
2176 amd64_emit_ext,
2177 amd64_emit_log_not,
2178 amd64_emit_bit_and,
2179 amd64_emit_bit_or,
2180 amd64_emit_bit_xor,
2181 amd64_emit_bit_not,
2182 amd64_emit_equal,
2183 amd64_emit_less_signed,
2184 amd64_emit_less_unsigned,
2185 amd64_emit_ref,
2186 amd64_emit_if_goto,
2187 amd64_emit_goto,
2188 amd64_write_goto_address,
2189 amd64_emit_const,
2190 amd64_emit_call,
2191 amd64_emit_reg,
2192 amd64_emit_pop,
2193 amd64_emit_stack_flush,
2194 amd64_emit_zero_ext,
2195 amd64_emit_swap,
2196 amd64_emit_stack_adjust,
2197 amd64_emit_int_call_1,
6b9801d4
SS
2198 amd64_emit_void_call_2,
2199 amd64_emit_eq_goto,
2200 amd64_emit_ne_goto,
2201 amd64_emit_lt_goto,
2202 amd64_emit_le_goto,
2203 amd64_emit_gt_goto,
2204 amd64_emit_ge_goto
6a271cae
PA
2205 };
2206
2207#endif /* __x86_64__ */
2208
2209static void
2210i386_emit_prologue (void)
2211{
2212 EMIT_ASM32 (i386_prologue,
2213 "push %ebp\n\t"
bf15cbda
SS
2214 "mov %esp,%ebp\n\t"
2215 "push %ebx");
6a271cae
PA
2216 /* At this point, the raw regs base address is at 8(%ebp), and the
2217 value pointer is at 12(%ebp). */
2218}
2219
2220static void
2221i386_emit_epilogue (void)
2222{
2223 EMIT_ASM32 (i386_epilogue,
2224 "mov 12(%ebp),%ecx\n\t"
2225 "mov %eax,(%ecx)\n\t"
2226 "mov %ebx,0x4(%ecx)\n\t"
2227 "xor %eax,%eax\n\t"
bf15cbda 2228 "pop %ebx\n\t"
6a271cae
PA
2229 "pop %ebp\n\t"
2230 "ret");
2231}
2232
2233static void
2234i386_emit_add (void)
2235{
2236 EMIT_ASM32 (i386_add,
2237 "add (%esp),%eax\n\t"
2238 "adc 0x4(%esp),%ebx\n\t"
2239 "lea 0x8(%esp),%esp");
2240}
2241
2242static void
2243i386_emit_sub (void)
2244{
2245 EMIT_ASM32 (i386_sub,
2246 "subl %eax,(%esp)\n\t"
2247 "sbbl %ebx,4(%esp)\n\t"
2248 "pop %eax\n\t"
2249 "pop %ebx\n\t");
2250}
2251
2252static void
2253i386_emit_mul (void)
2254{
2255 emit_error = 1;
2256}
2257
2258static void
2259i386_emit_lsh (void)
2260{
2261 emit_error = 1;
2262}
2263
2264static void
2265i386_emit_rsh_signed (void)
2266{
2267 emit_error = 1;
2268}
2269
2270static void
2271i386_emit_rsh_unsigned (void)
2272{
2273 emit_error = 1;
2274}
2275
2276static void
2277i386_emit_ext (int arg)
2278{
2279 switch (arg)
2280 {
2281 case 8:
2282 EMIT_ASM32 (i386_ext_8,
2283 "cbtw\n\t"
2284 "cwtl\n\t"
2285 "movl %eax,%ebx\n\t"
2286 "sarl $31,%ebx");
2287 break;
2288 case 16:
2289 EMIT_ASM32 (i386_ext_16,
2290 "cwtl\n\t"
2291 "movl %eax,%ebx\n\t"
2292 "sarl $31,%ebx");
2293 break;
2294 case 32:
2295 EMIT_ASM32 (i386_ext_32,
2296 "movl %eax,%ebx\n\t"
2297 "sarl $31,%ebx");
2298 break;
2299 default:
2300 emit_error = 1;
2301 }
2302}
2303
2304static void
2305i386_emit_log_not (void)
2306{
2307 EMIT_ASM32 (i386_log_not,
2308 "or %ebx,%eax\n\t"
2309 "test %eax,%eax\n\t"
2310 "sete %cl\n\t"
2311 "xor %ebx,%ebx\n\t"
2312 "movzbl %cl,%eax");
2313}
2314
2315static void
2316i386_emit_bit_and (void)
2317{
2318 EMIT_ASM32 (i386_and,
2319 "and (%esp),%eax\n\t"
2320 "and 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2322}
2323
2324static void
2325i386_emit_bit_or (void)
2326{
2327 EMIT_ASM32 (i386_or,
2328 "or (%esp),%eax\n\t"
2329 "or 0x4(%esp),%ebx\n\t"
2330 "lea 0x8(%esp),%esp");
2331}
2332
2333static void
2334i386_emit_bit_xor (void)
2335{
2336 EMIT_ASM32 (i386_xor,
2337 "xor (%esp),%eax\n\t"
2338 "xor 0x4(%esp),%ebx\n\t"
2339 "lea 0x8(%esp),%esp");
2340}
2341
2342static void
2343i386_emit_bit_not (void)
2344{
2345 EMIT_ASM32 (i386_bit_not,
2346 "xor $0xffffffff,%eax\n\t"
2347 "xor $0xffffffff,%ebx\n\t");
2348}
2349
2350static void
2351i386_emit_equal (void)
2352{
2353 EMIT_ASM32 (i386_equal,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jne .Li386_equal_false\n\t"
2356 "cmpl %eax,(%esp)\n\t"
2357 "je .Li386_equal_true\n\t"
2358 ".Li386_equal_false:\n\t"
2359 "xor %eax,%eax\n\t"
2360 "jmp .Li386_equal_end\n\t"
2361 ".Li386_equal_true:\n\t"
2362 "mov $1,%eax\n\t"
2363 ".Li386_equal_end:\n\t"
2364 "xor %ebx,%ebx\n\t"
2365 "lea 0x8(%esp),%esp");
2366}
2367
2368static void
2369i386_emit_less_signed (void)
2370{
2371 EMIT_ASM32 (i386_less_signed,
2372 "cmpl %ebx,4(%esp)\n\t"
2373 "jl .Li386_less_signed_true\n\t"
2374 "jne .Li386_less_signed_false\n\t"
2375 "cmpl %eax,(%esp)\n\t"
2376 "jl .Li386_less_signed_true\n\t"
2377 ".Li386_less_signed_false:\n\t"
2378 "xor %eax,%eax\n\t"
2379 "jmp .Li386_less_signed_end\n\t"
2380 ".Li386_less_signed_true:\n\t"
2381 "mov $1,%eax\n\t"
2382 ".Li386_less_signed_end:\n\t"
2383 "xor %ebx,%ebx\n\t"
2384 "lea 0x8(%esp),%esp");
2385}
2386
2387static void
2388i386_emit_less_unsigned (void)
2389{
2390 EMIT_ASM32 (i386_less_unsigned,
2391 "cmpl %ebx,4(%esp)\n\t"
2392 "jb .Li386_less_unsigned_true\n\t"
2393 "jne .Li386_less_unsigned_false\n\t"
2394 "cmpl %eax,(%esp)\n\t"
2395 "jb .Li386_less_unsigned_true\n\t"
2396 ".Li386_less_unsigned_false:\n\t"
2397 "xor %eax,%eax\n\t"
2398 "jmp .Li386_less_unsigned_end\n\t"
2399 ".Li386_less_unsigned_true:\n\t"
2400 "mov $1,%eax\n\t"
2401 ".Li386_less_unsigned_end:\n\t"
2402 "xor %ebx,%ebx\n\t"
2403 "lea 0x8(%esp),%esp");
2404}
2405
2406static void
2407i386_emit_ref (int size)
2408{
2409 switch (size)
2410 {
2411 case 1:
2412 EMIT_ASM32 (i386_ref1,
2413 "movb (%eax),%al");
2414 break;
2415 case 2:
2416 EMIT_ASM32 (i386_ref2,
2417 "movw (%eax),%ax");
2418 break;
2419 case 4:
2420 EMIT_ASM32 (i386_ref4,
2421 "movl (%eax),%eax");
2422 break;
2423 case 8:
2424 EMIT_ASM32 (i386_ref8,
2425 "movl 4(%eax),%ebx\n\t"
2426 "movl (%eax),%eax");
2427 break;
2428 }
2429}
2430
2431static void
2432i386_emit_if_goto (int *offset_p, int *size_p)
2433{
2434 EMIT_ASM32 (i386_if_goto,
2435 "mov %eax,%ecx\n\t"
2436 "or %ebx,%ecx\n\t"
2437 "pop %eax\n\t"
2438 "pop %ebx\n\t"
2439 "cmpl $0,%ecx\n\t"
2440 /* Don't trust the assembler to choose the right jump */
2441 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2442
2443 if (offset_p)
2444 *offset_p = 11; /* be sure that this matches the sequence above */
2445 if (size_p)
2446 *size_p = 4;
2447}
2448
2449static void
2450i386_emit_goto (int *offset_p, int *size_p)
2451{
2452 EMIT_ASM32 (i386_goto,
2453 /* Don't trust the assembler to choose the right jump */
2454 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2455 if (offset_p)
2456 *offset_p = 1;
2457 if (size_p)
2458 *size_p = 4;
2459}
2460
2461static void
2462i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2463{
2464 int diff = (to - (from + size));
2465 unsigned char buf[sizeof (int)];
2466
2467 /* We're only doing 4-byte sizes at the moment. */
2468 if (size != 4)
2469 {
2470 emit_error = 1;
2471 return;
2472 }
2473
2474 memcpy (buf, &diff, sizeof (int));
4196ab2a 2475 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2476}
2477
2478static void
4e29fb54 2479i386_emit_const (LONGEST num)
6a271cae
PA
2480{
2481 unsigned char buf[16];
b00ad6ff 2482 int i, hi, lo;
6a271cae
PA
2483 CORE_ADDR buildaddr = current_insn_ptr;
2484
2485 i = 0;
2486 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2487 lo = num & 0xffffffff;
2488 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2489 i += 4;
2490 hi = ((num >> 32) & 0xffffffff);
2491 if (hi)
2492 {
2493 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2494 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2495 i += 4;
2496 }
2497 else
2498 {
2499 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2500 }
2501 append_insns (&buildaddr, i, buf);
2502 current_insn_ptr = buildaddr;
2503}
2504
2505static void
2506i386_emit_call (CORE_ADDR fn)
2507{
2508 unsigned char buf[16];
2509 int i, offset;
2510 CORE_ADDR buildaddr;
2511
2512 buildaddr = current_insn_ptr;
2513 i = 0;
2514 buf[i++] = 0xe8; /* call <reladdr> */
2515 offset = ((int) fn) - (buildaddr + 5);
2516 memcpy (buf + 1, &offset, 4);
2517 append_insns (&buildaddr, 5, buf);
2518 current_insn_ptr = buildaddr;
2519}
2520
2521static void
2522i386_emit_reg (int reg)
2523{
2524 unsigned char buf[16];
2525 int i;
2526 CORE_ADDR buildaddr;
2527
2528 EMIT_ASM32 (i386_reg_a,
2529 "sub $0x8,%esp");
2530 buildaddr = current_insn_ptr;
2531 i = 0;
2532 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2533 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2534 i += 4;
2535 append_insns (&buildaddr, i, buf);
2536 current_insn_ptr = buildaddr;
2537 EMIT_ASM32 (i386_reg_b,
2538 "mov %eax,4(%esp)\n\t"
2539 "mov 8(%ebp),%eax\n\t"
2540 "mov %eax,(%esp)");
2541 i386_emit_call (get_raw_reg_func_addr ());
2542 EMIT_ASM32 (i386_reg_c,
2543 "xor %ebx,%ebx\n\t"
2544 "lea 0x8(%esp),%esp");
2545}
2546
2547static void
2548i386_emit_pop (void)
2549{
2550 EMIT_ASM32 (i386_pop,
2551 "pop %eax\n\t"
2552 "pop %ebx");
2553}
2554
2555static void
2556i386_emit_stack_flush (void)
2557{
2558 EMIT_ASM32 (i386_stack_flush,
2559 "push %ebx\n\t"
2560 "push %eax");
2561}
2562
2563static void
2564i386_emit_zero_ext (int arg)
2565{
2566 switch (arg)
2567 {
2568 case 8:
2569 EMIT_ASM32 (i386_zero_ext_8,
2570 "and $0xff,%eax\n\t"
2571 "xor %ebx,%ebx");
2572 break;
2573 case 16:
2574 EMIT_ASM32 (i386_zero_ext_16,
2575 "and $0xffff,%eax\n\t"
2576 "xor %ebx,%ebx");
2577 break;
2578 case 32:
2579 EMIT_ASM32 (i386_zero_ext_32,
2580 "xor %ebx,%ebx");
2581 break;
2582 default:
2583 emit_error = 1;
2584 }
2585}
2586
2587static void
2588i386_emit_swap (void)
2589{
2590 EMIT_ASM32 (i386_swap,
2591 "mov %eax,%ecx\n\t"
2592 "mov %ebx,%edx\n\t"
2593 "pop %eax\n\t"
2594 "pop %ebx\n\t"
2595 "push %edx\n\t"
2596 "push %ecx");
2597}
2598
2599static void
2600i386_emit_stack_adjust (int n)
2601{
2602 unsigned char buf[16];
2603 int i;
2604 CORE_ADDR buildaddr = current_insn_ptr;
2605
2606 i = 0;
2607 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2608 buf[i++] = 0x64;
2609 buf[i++] = 0x24;
2610 buf[i++] = n * 8;
2611 append_insns (&buildaddr, i, buf);
2612 current_insn_ptr = buildaddr;
2613}
2614
2615/* FN's prototype is `LONGEST(*fn)(int)'. */
2616
2617static void
2618i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2619{
2620 unsigned char buf[16];
2621 int i;
2622 CORE_ADDR buildaddr;
2623
2624 EMIT_ASM32 (i386_int_call_1_a,
2625 /* Reserve a bit of stack space. */
2626 "sub $0x8,%esp");
2627 /* Put the one argument on the stack. */
2628 buildaddr = current_insn_ptr;
2629 i = 0;
2630 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2631 buf[i++] = 0x04;
2632 buf[i++] = 0x24;
b00ad6ff 2633 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2634 i += 4;
2635 append_insns (&buildaddr, i, buf);
2636 current_insn_ptr = buildaddr;
2637 i386_emit_call (fn);
2638 EMIT_ASM32 (i386_int_call_1_c,
2639 "mov %edx,%ebx\n\t"
2640 "lea 0x8(%esp),%esp");
2641}
2642
4e29fb54 2643/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2644
2645static void
2646i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2647{
2648 unsigned char buf[16];
2649 int i;
2650 CORE_ADDR buildaddr;
2651
2652 EMIT_ASM32 (i386_void_call_2_a,
2653 /* Preserve %eax only; we don't have to worry about %ebx. */
2654 "push %eax\n\t"
2655 /* Reserve a bit of stack space for arguments. */
2656 "sub $0x10,%esp\n\t"
2657 /* Copy "top" to the second argument position. (Note that
2658 we can't assume function won't scribble on its
2659 arguments, so don't try to restore from this.) */
2660 "mov %eax,4(%esp)\n\t"
2661 "mov %ebx,8(%esp)");
2662 /* Put the first argument on the stack. */
2663 buildaddr = current_insn_ptr;
2664 i = 0;
2665 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2666 buf[i++] = 0x04;
2667 buf[i++] = 0x24;
b00ad6ff 2668 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2669 i += 4;
2670 append_insns (&buildaddr, i, buf);
2671 current_insn_ptr = buildaddr;
2672 i386_emit_call (fn);
2673 EMIT_ASM32 (i386_void_call_2_b,
2674 "lea 0x10(%esp),%esp\n\t"
2675 /* Restore original stack top. */
2676 "pop %eax");
2677}
2678
6b9801d4 2679
df4a0200 2680static void
6b9801d4
SS
2681i386_emit_eq_goto (int *offset_p, int *size_p)
2682{
2683 EMIT_ASM32 (eq,
2684 /* Check low half first, more likely to be decider */
2685 "cmpl %eax,(%esp)\n\t"
2686 "jne .Leq_fallthru\n\t"
2687 "cmpl %ebx,4(%esp)\n\t"
2688 "jne .Leq_fallthru\n\t"
2689 "lea 0x8(%esp),%esp\n\t"
2690 "pop %eax\n\t"
2691 "pop %ebx\n\t"
2692 /* jmp, but don't trust the assembler to choose the right jump */
2693 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2694 ".Leq_fallthru:\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2696 "pop %eax\n\t"
2697 "pop %ebx");
2698
2699 if (offset_p)
2700 *offset_p = 18;
2701 if (size_p)
2702 *size_p = 4;
2703}
2704
df4a0200 2705static void
6b9801d4
SS
2706i386_emit_ne_goto (int *offset_p, int *size_p)
2707{
2708 EMIT_ASM32 (ne,
2709 /* Check low half first, more likely to be decider */
2710 "cmpl %eax,(%esp)\n\t"
2711 "jne .Lne_jump\n\t"
2712 "cmpl %ebx,4(%esp)\n\t"
2713 "je .Lne_fallthru\n\t"
2714 ".Lne_jump:\n\t"
2715 "lea 0x8(%esp),%esp\n\t"
2716 "pop %eax\n\t"
2717 "pop %ebx\n\t"
2718 /* jmp, but don't trust the assembler to choose the right jump */
2719 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2720 ".Lne_fallthru:\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2722 "pop %eax\n\t"
2723 "pop %ebx");
2724
2725 if (offset_p)
2726 *offset_p = 18;
2727 if (size_p)
2728 *size_p = 4;
2729}
2730
df4a0200 2731static void
6b9801d4
SS
2732i386_emit_lt_goto (int *offset_p, int *size_p)
2733{
2734 EMIT_ASM32 (lt,
2735 "cmpl %ebx,4(%esp)\n\t"
2736 "jl .Llt_jump\n\t"
2737 "jne .Llt_fallthru\n\t"
2738 "cmpl %eax,(%esp)\n\t"
2739 "jnl .Llt_fallthru\n\t"
2740 ".Llt_jump:\n\t"
2741 "lea 0x8(%esp),%esp\n\t"
2742 "pop %eax\n\t"
2743 "pop %ebx\n\t"
2744 /* jmp, but don't trust the assembler to choose the right jump */
2745 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2746 ".Llt_fallthru:\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2748 "pop %eax\n\t"
2749 "pop %ebx");
2750
2751 if (offset_p)
2752 *offset_p = 20;
2753 if (size_p)
2754 *size_p = 4;
2755}
2756
df4a0200 2757static void
6b9801d4
SS
2758i386_emit_le_goto (int *offset_p, int *size_p)
2759{
2760 EMIT_ASM32 (le,
2761 "cmpl %ebx,4(%esp)\n\t"
2762 "jle .Lle_jump\n\t"
2763 "jne .Lle_fallthru\n\t"
2764 "cmpl %eax,(%esp)\n\t"
2765 "jnle .Lle_fallthru\n\t"
2766 ".Lle_jump:\n\t"
2767 "lea 0x8(%esp),%esp\n\t"
2768 "pop %eax\n\t"
2769 "pop %ebx\n\t"
2770 /* jmp, but don't trust the assembler to choose the right jump */
2771 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2772 ".Lle_fallthru:\n\t"
2773 "lea 0x8(%esp),%esp\n\t"
2774 "pop %eax\n\t"
2775 "pop %ebx");
2776
2777 if (offset_p)
2778 *offset_p = 20;
2779 if (size_p)
2780 *size_p = 4;
2781}
2782
df4a0200 2783static void
6b9801d4
SS
2784i386_emit_gt_goto (int *offset_p, int *size_p)
2785{
2786 EMIT_ASM32 (gt,
2787 "cmpl %ebx,4(%esp)\n\t"
2788 "jg .Lgt_jump\n\t"
2789 "jne .Lgt_fallthru\n\t"
2790 "cmpl %eax,(%esp)\n\t"
2791 "jng .Lgt_fallthru\n\t"
2792 ".Lgt_jump:\n\t"
2793 "lea 0x8(%esp),%esp\n\t"
2794 "pop %eax\n\t"
2795 "pop %ebx\n\t"
2796 /* jmp, but don't trust the assembler to choose the right jump */
2797 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2798 ".Lgt_fallthru:\n\t"
2799 "lea 0x8(%esp),%esp\n\t"
2800 "pop %eax\n\t"
2801 "pop %ebx");
2802
2803 if (offset_p)
2804 *offset_p = 20;
2805 if (size_p)
2806 *size_p = 4;
2807}
2808
df4a0200 2809static void
6b9801d4
SS
2810i386_emit_ge_goto (int *offset_p, int *size_p)
2811{
2812 EMIT_ASM32 (ge,
2813 "cmpl %ebx,4(%esp)\n\t"
2814 "jge .Lge_jump\n\t"
2815 "jne .Lge_fallthru\n\t"
2816 "cmpl %eax,(%esp)\n\t"
2817 "jnge .Lge_fallthru\n\t"
2818 ".Lge_jump:\n\t"
2819 "lea 0x8(%esp),%esp\n\t"
2820 "pop %eax\n\t"
2821 "pop %ebx\n\t"
2822 /* jmp, but don't trust the assembler to choose the right jump */
2823 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2824 ".Lge_fallthru:\n\t"
2825 "lea 0x8(%esp),%esp\n\t"
2826 "pop %eax\n\t"
2827 "pop %ebx");
2828
2829 if (offset_p)
2830 *offset_p = 20;
2831 if (size_p)
2832 *size_p = 4;
2833}
2834
6a271cae
PA
2835struct emit_ops i386_emit_ops =
2836 {
2837 i386_emit_prologue,
2838 i386_emit_epilogue,
2839 i386_emit_add,
2840 i386_emit_sub,
2841 i386_emit_mul,
2842 i386_emit_lsh,
2843 i386_emit_rsh_signed,
2844 i386_emit_rsh_unsigned,
2845 i386_emit_ext,
2846 i386_emit_log_not,
2847 i386_emit_bit_and,
2848 i386_emit_bit_or,
2849 i386_emit_bit_xor,
2850 i386_emit_bit_not,
2851 i386_emit_equal,
2852 i386_emit_less_signed,
2853 i386_emit_less_unsigned,
2854 i386_emit_ref,
2855 i386_emit_if_goto,
2856 i386_emit_goto,
2857 i386_write_goto_address,
2858 i386_emit_const,
2859 i386_emit_call,
2860 i386_emit_reg,
2861 i386_emit_pop,
2862 i386_emit_stack_flush,
2863 i386_emit_zero_ext,
2864 i386_emit_swap,
2865 i386_emit_stack_adjust,
2866 i386_emit_int_call_1,
6b9801d4
SS
2867 i386_emit_void_call_2,
2868 i386_emit_eq_goto,
2869 i386_emit_ne_goto,
2870 i386_emit_lt_goto,
2871 i386_emit_le_goto,
2872 i386_emit_gt_goto,
2873 i386_emit_ge_goto
6a271cae
PA
2874 };
2875
2876
2877static struct emit_ops *
2878x86_emit_ops (void)
2879{
2880#ifdef __x86_64__
3aee8918 2881 if (is_64bit_tdesc ())
6a271cae
PA
2882 return &amd64_emit_ops;
2883 else
2884#endif
2885 return &i386_emit_ops;
2886}
2887
3ca4edb6 2888/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2889
3ca4edb6
TBA
2890const gdb_byte *
2891x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2892{
2893 *size = x86_breakpoint_len;
2894 return x86_breakpoint;
2895}
2896
c2d6af84
PA
2897static int
2898x86_supports_range_stepping (void)
2899{
2900 return 1;
2901}
2902
7d00775e
AT
2903/* Implementation of linux_target_ops method "supports_hardware_single_step".
2904 */
2905
2906static int
2907x86_supports_hardware_single_step (void)
2908{
2909 return 1;
2910}
2911
ae91f625
MK
2912static int
2913x86_get_ipa_tdesc_idx (void)
2914{
2915 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2916 const struct target_desc *tdesc = regcache->tdesc;
2917
2918#ifdef __x86_64__
b4570e4b 2919 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2920#endif
2921
f49ff000 2922 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2923 return X86_TDESC_SSE;
ae91f625 2924
f49ff000 2925 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2926}
2927
d0722149
DE
2928/* This is initialized assuming an amd64 target.
2929 x86_arch_setup will correct it for i386 or amd64 targets. */
2930
2931struct linux_target_ops the_low_target =
2932{
aa5ca48f 2933 x86_linux_new_process,
04ec7890 2934 x86_linux_delete_process,
aa5ca48f 2935 x86_linux_new_thread,
466eecee 2936 x86_linux_delete_thread,
3a8a0396 2937 x86_linux_new_fork,
1570b33e 2938 x86_linux_prepare_to_resume,
219f2f23 2939 x86_linux_process_qsupported,
fa593d66
PA
2940 x86_supports_tracepoints,
2941 x86_get_thread_area,
6a271cae 2942 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2943 x86_emit_ops,
2944 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2945 x86_supports_range_stepping,
7d00775e 2946 x86_supports_hardware_single_step,
82075af2 2947 x86_get_syscall_trapinfo,
ae91f625 2948 x86_get_ipa_tdesc_idx,
d0722149 2949};
3aee8918 2950
ef0478f6
TBA
2951/* The linux target ops object. */
2952
2953linux_process_target *the_linux_target = &the_x86_target;
2954
3aee8918
PA
2955void
2956initialize_low_arch (void)
2957{
2958 /* Initialize the Linux target descriptions. */
2959#ifdef __x86_64__
cc397f3a 2960 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2961 copy_target_description (tdesc_amd64_linux_no_xml,
2962 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2963 false));
3aee8918
PA
2964 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2965#endif
f49ff000 2966
cc397f3a 2967 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2968 copy_target_description (tdesc_i386_linux_no_xml,
2969 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2970 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2971
2972 initialize_regsets_info (&x86_regsets_info);
2973}
This page took 1.107581 seconds and 4 git commands to generate.