gdbserver/linux-low: turn process/thread addition/deletion ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
3ca4edb6
TBA
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
007c9b97
TBA
111 bool supports_z_point_type (char z_type) override;
112
797bcff5
TBA
113protected:
114
115 void low_arch_setup () override;
daca57a7
TBA
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
126
127 int low_decr_pc_after_break () override;
d7146cda
TBA
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
fd000fb3
TBA
148
149 arch_process_info *low_new_process () override;
150
151 void low_delete_process (arch_process_info *info) override;
152
153 void low_new_thread (lwp_info *) override;
154
155 void low_delete_thread (arch_lwp_info *) override;
156
157 void low_new_fork (process_info *parent, process_info *child) override;
ef0478f6
TBA
158};
159
160/* The singleton target ops object. */
161
162static x86_target the_x86_target;
163
aa5ca48f
DE
164/* Per-process arch-specific data we want to keep. */
165
166struct arch_process_info
167{
df7e5265 168 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
169};
170
d0722149
DE
171#ifdef __x86_64__
172
173/* Mapping between the general-purpose registers in `struct user'
174 format and GDB's register array layout.
175 Note that the transfer layout uses 64-bit regs. */
176static /*const*/ int i386_regmap[] =
177{
178 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
179 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
180 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
181 DS * 8, ES * 8, FS * 8, GS * 8
182};
183
184#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
185
186/* So code below doesn't have to care, i386 or amd64. */
187#define ORIG_EAX ORIG_RAX
bc9540e8 188#define REGSIZE 8
d0722149
DE
189
190static const int x86_64_regmap[] =
191{
192 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
193 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
194 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
195 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
196 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
197 DS * 8, ES * 8, FS * 8, GS * 8,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
201 -1,
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 ORIG_RAX * 8,
2735833d
WT
204#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
205 21 * 8, 22 * 8,
206#else
207 -1, -1,
208#endif
a196ebeb 209 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
210 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
211 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
212 -1, -1, -1, -1, -1, -1, -1, -1,
213 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
214 -1, -1, -1, -1, -1, -1, -1, -1,
215 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
216 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
217 -1, -1, -1, -1, -1, -1, -1, -1,
218 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
219 -1, -1, -1, -1, -1, -1, -1, -1,
220 -1 /* pkru */
d0722149
DE
221};
222
223#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 224#define X86_64_USER_REGS (GS + 1)
d0722149
DE
225
226#else /* ! __x86_64__ */
227
228/* Mapping between the general-purpose registers in `struct user'
229 format and GDB's register array layout. */
230static /*const*/ int i386_regmap[] =
231{
232 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
233 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
234 EIP * 4, EFL * 4, CS * 4, SS * 4,
235 DS * 4, ES * 4, FS * 4, GS * 4
236};
237
238#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
239
bc9540e8
PA
240#define REGSIZE 4
241
d0722149 242#endif
3aee8918
PA
243
244#ifdef __x86_64__
245
246/* Returns true if the current inferior belongs to a x86-64 process,
247 per the tdesc. */
248
249static int
250is_64bit_tdesc (void)
251{
0bfdf32f 252 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
253
254 return register_size (regcache->tdesc, 0) == 8;
255}
256
257#endif
258
d0722149
DE
259\f
260/* Called by libthread_db. */
261
262ps_err_e
754653a7 263ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
264 lwpid_t lwpid, int idx, void **base)
265{
266#ifdef __x86_64__
3aee8918 267 int use_64bit = is_64bit_tdesc ();
d0722149
DE
268
269 if (use_64bit)
270 {
271 switch (idx)
272 {
273 case FS:
274 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
275 return PS_OK;
276 break;
277 case GS:
278 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
279 return PS_OK;
280 break;
281 default:
282 return PS_BADADDR;
283 }
284 return PS_ERR;
285 }
286#endif
287
288 {
289 unsigned int desc[4];
290
291 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
292 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
293 return PS_ERR;
294
d1ec4ce7
DE
295 /* Ensure we properly extend the value to 64-bits for x86_64. */
296 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
297 return PS_OK;
298 }
299}
fa593d66
PA
300
301/* Get the thread area address. This is used to recognize which
302 thread is which when tracing with the in-process agent library. We
303 don't read anything from the address, and treat it as opaque; it's
304 the address itself that we assume is unique per-thread. */
305
306static int
307x86_get_thread_area (int lwpid, CORE_ADDR *addr)
308{
309#ifdef __x86_64__
3aee8918 310 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
311
312 if (use_64bit)
313 {
314 void *base;
315 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
316 {
317 *addr = (CORE_ADDR) (uintptr_t) base;
318 return 0;
319 }
320
321 return -1;
322 }
323#endif
324
325 {
f2907e49 326 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
327 struct thread_info *thr = get_lwp_thread (lwp);
328 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
329 unsigned int desc[4];
330 ULONGEST gs = 0;
331 const int reg_thread_area = 3; /* bits to scale down register value. */
332 int idx;
333
334 collect_register_by_name (regcache, "gs", &gs);
335
336 idx = gs >> reg_thread_area;
337
338 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 339 lwpid_of (thr),
493e2a69 340 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
341 return -1;
342
343 *addr = desc[1];
344 return 0;
345 }
346}
347
348
d0722149 349\f
daca57a7
TBA
350bool
351x86_target::low_cannot_store_register (int regno)
d0722149 352{
3aee8918
PA
353#ifdef __x86_64__
354 if (is_64bit_tdesc ())
daca57a7 355 return false;
3aee8918
PA
356#endif
357
d0722149
DE
358 return regno >= I386_NUM_REGS;
359}
360
daca57a7
TBA
361bool
362x86_target::low_cannot_fetch_register (int regno)
d0722149 363{
3aee8918
PA
364#ifdef __x86_64__
365 if (is_64bit_tdesc ())
daca57a7 366 return false;
3aee8918
PA
367#endif
368
d0722149
DE
369 return regno >= I386_NUM_REGS;
370}
371
372static void
442ea881 373x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
374{
375 int i;
376
377#ifdef __x86_64__
3aee8918 378 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
379 {
380 for (i = 0; i < X86_64_NUM_REGS; i++)
381 if (x86_64_regmap[i] != -1)
442ea881 382 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
383
384#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
385 {
386 unsigned long base;
387 int lwpid = lwpid_of (current_thread);
388
389 collect_register_by_name (regcache, "fs_base", &base);
390 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
391
392 collect_register_by_name (regcache, "gs_base", &base);
393 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
394 }
395#endif
396
d0722149
DE
397 return;
398 }
9e0aa64f
JK
399
400 /* 32-bit inferior registers need to be zero-extended.
401 Callers would read uninitialized memory otherwise. */
402 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
403#endif
404
405 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 406 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 407
442ea881 408 collect_register_by_name (regcache, "orig_eax",
bc9540e8 409 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 410
e90a813d 411#ifdef __x86_64__
3f52fdbc
KB
412 /* Sign extend EAX value to avoid potential syscall restart
413 problems.
414
415 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
416 for a detailed explanation. */
417 if (register_size (regcache->tdesc, 0) == 4)
418 {
419 void *ptr = ((gdb_byte *) buf
420 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
421
422 *(int64_t *) ptr = *(int32_t *) ptr;
423 }
e90a813d 424#endif
d0722149
DE
425}
426
427static void
442ea881 428x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
429{
430 int i;
431
432#ifdef __x86_64__
3aee8918 433 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
434 {
435 for (i = 0; i < X86_64_NUM_REGS; i++)
436 if (x86_64_regmap[i] != -1)
442ea881 437 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
438
439#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
440 {
441 unsigned long base;
442 int lwpid = lwpid_of (current_thread);
443
444 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
445 supply_register_by_name (regcache, "fs_base", &base);
446
447 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
448 supply_register_by_name (regcache, "gs_base", &base);
449 }
450#endif
d0722149
DE
451 return;
452 }
453#endif
454
455 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 456 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 457
442ea881 458 supply_register_by_name (regcache, "orig_eax",
bc9540e8 459 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
460}
461
462static void
442ea881 463x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
464{
465#ifdef __x86_64__
442ea881 466 i387_cache_to_fxsave (regcache, buf);
d0722149 467#else
442ea881 468 i387_cache_to_fsave (regcache, buf);
d0722149
DE
469#endif
470}
471
472static void
442ea881 473x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
474{
475#ifdef __x86_64__
442ea881 476 i387_fxsave_to_cache (regcache, buf);
d0722149 477#else
442ea881 478 i387_fsave_to_cache (regcache, buf);
d0722149
DE
479#endif
480}
481
482#ifndef __x86_64__
483
484static void
442ea881 485x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 486{
442ea881 487 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
488}
489
490static void
442ea881 491x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 492{
442ea881 493 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
494}
495
496#endif
497
1570b33e
L
498static void
499x86_fill_xstateregset (struct regcache *regcache, void *buf)
500{
501 i387_cache_to_xsave (regcache, buf);
502}
503
504static void
505x86_store_xstateregset (struct regcache *regcache, const void *buf)
506{
507 i387_xsave_to_cache (regcache, buf);
508}
509
d0722149
DE
510/* ??? The non-biarch i386 case stores all the i387 regs twice.
511 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
512 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
513 doesn't work. IWBN to avoid the duplication in the case where it
514 does work. Maybe the arch_setup routine could check whether it works
3aee8918 515 and update the supported regsets accordingly. */
d0722149 516
3aee8918 517static struct regset_info x86_regsets[] =
d0722149
DE
518{
519#ifdef HAVE_PTRACE_GETREGS
1570b33e 520 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
521 GENERAL_REGS,
522 x86_fill_gregset, x86_store_gregset },
1570b33e
L
523 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
524 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
525# ifndef __x86_64__
526# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 527 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
528 EXTENDED_REGS,
529 x86_fill_fpxregset, x86_store_fpxregset },
530# endif
531# endif
1570b33e 532 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
533 FP_REGS,
534 x86_fill_fpregset, x86_store_fpregset },
535#endif /* HAVE_PTRACE_GETREGS */
50bc912a 536 NULL_REGSET
d0722149
DE
537};
538
bf9ae9d8
TBA
539bool
540x86_target::low_supports_breakpoints ()
541{
542 return true;
543}
544
545CORE_ADDR
546x86_target::low_get_pc (regcache *regcache)
d0722149 547{
3aee8918 548 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
549
550 if (use_64bit)
551 {
6598661d
PA
552 uint64_t pc;
553
442ea881 554 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
555 return (CORE_ADDR) pc;
556 }
557 else
558 {
6598661d
PA
559 uint32_t pc;
560
442ea881 561 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
562 return (CORE_ADDR) pc;
563 }
564}
565
bf9ae9d8
TBA
566void
567x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 568{
3aee8918 569 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
570
571 if (use_64bit)
572 {
6598661d
PA
573 uint64_t newpc = pc;
574
442ea881 575 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
576 }
577 else
578 {
6598661d
PA
579 uint32_t newpc = pc;
580
442ea881 581 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
582 }
583}
d4807ea2
TBA
584
585int
586x86_target::low_decr_pc_after_break ()
587{
588 return 1;
589}
590
d0722149 591\f
dd373349 592static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
593#define x86_breakpoint_len 1
594
d7146cda
TBA
595bool
596x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
597{
598 unsigned char c;
599
d7146cda 600 read_memory (pc, &c, 1);
d0722149 601 if (c == 0xCC)
d7146cda 602 return true;
d0722149 603
d7146cda 604 return false;
d0722149
DE
605}
606\f
42995dbd 607/* Low-level function vector. */
df7e5265 608struct x86_dr_low_type x86_dr_low =
42995dbd 609 {
d33472ad
GB
610 x86_linux_dr_set_control,
611 x86_linux_dr_set_addr,
612 x86_linux_dr_get_addr,
613 x86_linux_dr_get_status,
614 x86_linux_dr_get_control,
42995dbd
GB
615 sizeof (void *),
616 };
aa5ca48f 617\f
90d74c30 618/* Breakpoint/Watchpoint support. */
aa5ca48f 619
007c9b97
TBA
620bool
621x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
622{
623 switch (z_type)
624 {
625 case Z_PACKET_SW_BP:
626 case Z_PACKET_HW_BP:
627 case Z_PACKET_WRITE_WP:
628 case Z_PACKET_ACCESS_WP:
007c9b97 629 return true;
802e8e6d 630 default:
007c9b97 631 return false;
802e8e6d
PA
632 }
633}
634
9db9aa23
TBA
635int
636x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
637 int size, raw_breakpoint *bp)
aa5ca48f
DE
638{
639 struct process_info *proc = current_process ();
802e8e6d 640
aa5ca48f
DE
641 switch (type)
642 {
802e8e6d
PA
643 case raw_bkpt_type_hw:
644 case raw_bkpt_type_write_wp:
645 case raw_bkpt_type_access_wp:
a4165e94 646 {
802e8e6d
PA
647 enum target_hw_bp_type hw_type
648 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 649 struct x86_debug_reg_state *state
fe978cb0 650 = &proc->priv->arch_private->debug_reg_state;
a4165e94 651
df7e5265 652 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 653 }
961bd387 654
aa5ca48f
DE
655 default:
656 /* Unsupported. */
657 return 1;
658 }
659}
660
9db9aa23
TBA
661int
662x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
663 int size, raw_breakpoint *bp)
aa5ca48f
DE
664{
665 struct process_info *proc = current_process ();
802e8e6d 666
aa5ca48f
DE
667 switch (type)
668 {
802e8e6d
PA
669 case raw_bkpt_type_hw:
670 case raw_bkpt_type_write_wp:
671 case raw_bkpt_type_access_wp:
a4165e94 672 {
802e8e6d
PA
673 enum target_hw_bp_type hw_type
674 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 675 struct x86_debug_reg_state *state
fe978cb0 676 = &proc->priv->arch_private->debug_reg_state;
a4165e94 677
df7e5265 678 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 679 }
aa5ca48f
DE
680 default:
681 /* Unsupported. */
682 return 1;
683 }
684}
685
ac1bbaca
TBA
686bool
687x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
688{
689 struct process_info *proc = current_process ();
fe978cb0 690 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
691}
692
ac1bbaca
TBA
693CORE_ADDR
694x86_target::low_stopped_data_address ()
aa5ca48f
DE
695{
696 struct process_info *proc = current_process ();
697 CORE_ADDR addr;
fe978cb0 698 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 699 &addr))
aa5ca48f
DE
700 return addr;
701 return 0;
702}
703\f
704/* Called when a new process is created. */
705
fd000fb3
TBA
706arch_process_info *
707x86_target::low_new_process ()
aa5ca48f 708{
ed859da7 709 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 710
df7e5265 711 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
712
713 return info;
714}
715
04ec7890
SM
716/* Called when a process is being deleted. */
717
fd000fb3
TBA
718void
719x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
720{
721 xfree (info);
722}
723
fd000fb3
TBA
724void
725x86_target::low_new_thread (lwp_info *lwp)
726{
727 /* This comes from nat/. */
728 x86_linux_new_thread (lwp);
729}
3a8a0396 730
fd000fb3
TBA
731void
732x86_target::low_delete_thread (arch_lwp_info *alwp)
733{
734 /* This comes from nat/. */
735 x86_linux_delete_thread (alwp);
736}
737
738/* Target routine for new_fork. */
739
740void
741x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
742{
743 /* These are allocated by linux_add_process. */
744 gdb_assert (parent->priv != NULL
745 && parent->priv->arch_private != NULL);
746 gdb_assert (child->priv != NULL
747 && child->priv->arch_private != NULL);
748
749 /* Linux kernel before 2.6.33 commit
750 72f674d203cd230426437cdcf7dd6f681dad8b0d
751 will inherit hardware debug registers from parent
752 on fork/vfork/clone. Newer Linux kernels create such tasks with
753 zeroed debug registers.
754
755 GDB core assumes the child inherits the watchpoints/hw
756 breakpoints of the parent, and will remove them all from the
757 forked off process. Copy the debug registers mirrors into the
758 new process so that all breakpoints and watchpoints can be
759 removed together. The debug registers mirror will become zeroed
760 in the end before detaching the forked off process, thus making
761 this compatible with older Linux kernels too. */
762
763 *child->priv->arch_private = *parent->priv->arch_private;
764}
765
70a0bb6b
GB
766/* See nat/x86-dregs.h. */
767
768struct x86_debug_reg_state *
769x86_debug_reg_state (pid_t pid)
770{
771 struct process_info *proc = find_process_pid (pid);
772
773 return &proc->priv->arch_private->debug_reg_state;
774}
aa5ca48f 775\f
d0722149
DE
776/* When GDBSERVER is built as a 64-bit application on linux, the
777 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
778 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
779 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
780 conversion in-place ourselves. */
781
9cf12d57 782/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
783 layout of the inferiors' architecture. Returns true if any
784 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 785 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
786 INF. */
787
cb63de7c
TBA
788bool
789x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
790{
791#ifdef __x86_64__
760256f9 792 unsigned int machine;
0bfdf32f 793 int tid = lwpid_of (current_thread);
760256f9
PA
794 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
795
d0722149 796 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 797 if (!is_64bit_tdesc ())
9cf12d57 798 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 799 FIXUP_32);
c92b5177 800 /* No fixup for native x32 GDB. */
760256f9 801 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 802 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 803 FIXUP_X32);
d0722149
DE
804#endif
805
cb63de7c 806 return false;
d0722149
DE
807}
808\f
1570b33e
L
809static int use_xml;
810
3aee8918
PA
811/* Format of XSAVE extended state is:
812 struct
813 {
814 fxsave_bytes[0..463]
815 sw_usable_bytes[464..511]
816 xstate_hdr_bytes[512..575]
817 avx_bytes[576..831]
818 future_state etc
819 };
820
821 Same memory layout will be used for the coredump NT_X86_XSTATE
822 representing the XSAVE extended state registers.
823
824 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
825 extended state mask, which is the same as the extended control register
826 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
827 together with the mask saved in the xstate_hdr_bytes to determine what
828 states the processor/OS supports and what state, used or initialized,
829 the process/thread is in. */
830#define I386_LINUX_XSAVE_XCR0_OFFSET 464
831
832/* Does the current host support the GETFPXREGS request? The header
833 file may or may not define it, and even if it is defined, the
834 kernel will return EIO if it's running on a pre-SSE processor. */
835int have_ptrace_getfpxregs =
836#ifdef HAVE_PTRACE_GETFPXREGS
837 -1
838#else
839 0
840#endif
841;
1570b33e 842
3aee8918
PA
843/* Get Linux/x86 target description from running target. */
844
845static const struct target_desc *
846x86_linux_read_description (void)
1570b33e 847{
3aee8918
PA
848 unsigned int machine;
849 int is_elf64;
a196ebeb 850 int xcr0_features;
3aee8918
PA
851 int tid;
852 static uint64_t xcr0;
3a13a53b 853 struct regset_info *regset;
1570b33e 854
0bfdf32f 855 tid = lwpid_of (current_thread);
1570b33e 856
3aee8918 857 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 858
3aee8918 859 if (sizeof (void *) == 4)
3a13a53b 860 {
3aee8918
PA
861 if (is_elf64 > 0)
862 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
863#ifndef __x86_64__
864 else if (machine == EM_X86_64)
865 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
866#endif
867 }
3a13a53b 868
3aee8918
PA
869#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
870 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
871 {
872 elf_fpxregset_t fpxregs;
3a13a53b 873
3aee8918 874 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 875 {
3aee8918
PA
876 have_ptrace_getfpxregs = 0;
877 have_ptrace_getregset = 0;
f49ff000 878 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 879 }
3aee8918
PA
880 else
881 have_ptrace_getfpxregs = 1;
3a13a53b 882 }
1570b33e
L
883#endif
884
885 if (!use_xml)
886 {
df7e5265 887 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 888
1570b33e
L
889 /* Don't use XML. */
890#ifdef __x86_64__
3aee8918
PA
891 if (machine == EM_X86_64)
892 return tdesc_amd64_linux_no_xml;
1570b33e 893 else
1570b33e 894#endif
3aee8918 895 return tdesc_i386_linux_no_xml;
1570b33e
L
896 }
897
1570b33e
L
898 if (have_ptrace_getregset == -1)
899 {
df7e5265 900 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 901 struct iovec iov;
1570b33e
L
902
903 iov.iov_base = xstateregs;
904 iov.iov_len = sizeof (xstateregs);
905
906 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
907 if (ptrace (PTRACE_GETREGSET, tid,
908 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
909 have_ptrace_getregset = 0;
910 else
1570b33e 911 {
3aee8918
PA
912 have_ptrace_getregset = 1;
913
914 /* Get XCR0 from XSAVE extended state. */
915 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
916 / sizeof (uint64_t))];
917
918 /* Use PTRACE_GETREGSET if it is available. */
919 for (regset = x86_regsets;
920 regset->fill_function != NULL; regset++)
921 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 922 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
923 else if (regset->type != GENERAL_REGS)
924 regset->size = 0;
1570b33e 925 }
1570b33e
L
926 }
927
3aee8918 928 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 929 xcr0_features = (have_ptrace_getregset
2e1e43e1 930 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 931
a196ebeb 932 if (xcr0_features)
3aee8918 933 x86_xcr0 = xcr0;
1570b33e 934
3aee8918
PA
935 if (machine == EM_X86_64)
936 {
1570b33e 937#ifdef __x86_64__
b4570e4b 938 const target_desc *tdesc = NULL;
a196ebeb 939
b4570e4b 940 if (xcr0_features)
3aee8918 941 {
b4570e4b
YQ
942 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
943 !is_elf64);
1570b33e 944 }
b4570e4b
YQ
945
946 if (tdesc == NULL)
947 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
948 return tdesc;
3aee8918 949#endif
1570b33e 950 }
3aee8918
PA
951 else
952 {
f49ff000 953 const target_desc *tdesc = NULL;
a1fa17ee 954
f49ff000
YQ
955 if (xcr0_features)
956 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 957
f49ff000
YQ
958 if (tdesc == NULL)
959 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 960
f49ff000 961 return tdesc;
3aee8918
PA
962 }
963
964 gdb_assert_not_reached ("failed to return tdesc");
965}
966
3aee8918
PA
967/* Update all the target description of all processes; a new GDB
968 connected, and it may or not support xml target descriptions. */
969
797bcff5
TBA
970void
971x86_target::update_xmltarget ()
3aee8918 972{
0bfdf32f 973 struct thread_info *saved_thread = current_thread;
3aee8918
PA
974
975 /* Before changing the register cache's internal layout, flush the
976 contents of the current valid caches back to the threads, and
977 release the current regcache objects. */
978 regcache_release ();
979
797bcff5 980 for_each_process ([this] (process_info *proc) {
9179355e
SM
981 int pid = proc->pid;
982
983 /* Look up any thread of this process. */
984 current_thread = find_any_thread_of_pid (pid);
985
797bcff5 986 low_arch_setup ();
9179355e 987 });
3aee8918 988
0bfdf32f 989 current_thread = saved_thread;
1570b33e
L
990}
991
992/* Process qSupported query, "xmlRegisters=". Update the buffer size for
993 PTRACE_GETREGSET. */
994
995static void
06e03fff 996x86_linux_process_qsupported (char **features, int count)
1570b33e 997{
06e03fff
PA
998 int i;
999
1570b33e
L
1000 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1001 with "i386" in qSupported query, it supports x86 XML target
1002 descriptions. */
1003 use_xml = 0;
06e03fff 1004 for (i = 0; i < count; i++)
1570b33e 1005 {
06e03fff 1006 const char *feature = features[i];
1570b33e 1007
06e03fff 1008 if (startswith (feature, "xmlRegisters="))
1570b33e 1009 {
06e03fff 1010 char *copy = xstrdup (feature + 13);
06e03fff 1011
ca3a04f6
CB
1012 char *saveptr;
1013 for (char *p = strtok_r (copy, ",", &saveptr);
1014 p != NULL;
1015 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1016 {
06e03fff
PA
1017 if (strcmp (p, "i386") == 0)
1018 {
1019 use_xml = 1;
1020 break;
1021 }
1570b33e 1022 }
1570b33e 1023
06e03fff
PA
1024 free (copy);
1025 }
1570b33e 1026 }
797bcff5 1027 the_x86_target.update_xmltarget ();
1570b33e
L
1028}
1029
3aee8918 1030/* Common for x86/x86-64. */
d0722149 1031
3aee8918
PA
1032static struct regsets_info x86_regsets_info =
1033 {
1034 x86_regsets, /* regsets */
1035 0, /* num_regsets */
1036 NULL, /* disabled_regsets */
1037 };
214d508e
L
1038
1039#ifdef __x86_64__
3aee8918
PA
1040static struct regs_info amd64_linux_regs_info =
1041 {
1042 NULL, /* regset_bitmap */
1043 NULL, /* usrregs_info */
1044 &x86_regsets_info
1045 };
d0722149 1046#endif
3aee8918
PA
1047static struct usrregs_info i386_linux_usrregs_info =
1048 {
1049 I386_NUM_REGS,
1050 i386_regmap,
1051 };
d0722149 1052
3aee8918
PA
1053static struct regs_info i386_linux_regs_info =
1054 {
1055 NULL, /* regset_bitmap */
1056 &i386_linux_usrregs_info,
1057 &x86_regsets_info
1058 };
d0722149 1059
aa8d21c9
TBA
1060const regs_info *
1061x86_target::get_regs_info ()
3aee8918
PA
1062{
1063#ifdef __x86_64__
1064 if (is_64bit_tdesc ())
1065 return &amd64_linux_regs_info;
1066 else
1067#endif
1068 return &i386_linux_regs_info;
1069}
d0722149 1070
3aee8918
PA
1071/* Initialize the target description for the architecture of the
1072 inferior. */
1570b33e 1073
797bcff5
TBA
1074void
1075x86_target::low_arch_setup ()
3aee8918
PA
1076{
1077 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1078}
1079
82075af2
JS
1080/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1081 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1082
1083static void
4cc32bec 1084x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1085{
1086 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1087
1088 if (use_64bit)
1089 {
1090 long l_sysno;
82075af2
JS
1091
1092 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1093 *sysno = (int) l_sysno;
82075af2
JS
1094 }
1095 else
4cc32bec 1096 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1097}
1098
219f2f23
PA
1099static int
1100x86_supports_tracepoints (void)
1101{
1102 return 1;
1103}
1104
fa593d66
PA
1105static void
1106append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1107{
4196ab2a 1108 target_write_memory (*to, buf, len);
fa593d66
PA
1109 *to += len;
1110}
1111
1112static int
a121b7c1 1113push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1114{
1115 unsigned char *buf_org = buf;
1116
1117 while (1)
1118 {
1119 char *endptr;
1120 unsigned long ul = strtoul (op, &endptr, 16);
1121
1122 if (endptr == op)
1123 break;
1124
1125 *buf++ = ul;
1126 op = endptr;
1127 }
1128
1129 return buf - buf_org;
1130}
1131
1132#ifdef __x86_64__
1133
1134/* Build a jump pad that saves registers and calls a collection
1135 function. Writes a jump instruction to the jump pad to
1136 JJUMPAD_INSN. The caller is responsible to write it in at the
1137 tracepoint address. */
1138
1139static int
1140amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1141 CORE_ADDR collector,
1142 CORE_ADDR lockaddr,
1143 ULONGEST orig_size,
1144 CORE_ADDR *jump_entry,
405f8e94
SS
1145 CORE_ADDR *trampoline,
1146 ULONGEST *trampoline_size,
fa593d66
PA
1147 unsigned char *jjump_pad_insn,
1148 ULONGEST *jjump_pad_insn_size,
1149 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1150 CORE_ADDR *adjusted_insn_addr_end,
1151 char *err)
fa593d66
PA
1152{
1153 unsigned char buf[40];
1154 int i, offset;
f4647387
YQ
1155 int64_t loffset;
1156
fa593d66
PA
1157 CORE_ADDR buildaddr = *jump_entry;
1158
1159 /* Build the jump pad. */
1160
1161 /* First, do tracepoint data collection. Save registers. */
1162 i = 0;
1163 /* Need to ensure stack pointer saved first. */
1164 buf[i++] = 0x54; /* push %rsp */
1165 buf[i++] = 0x55; /* push %rbp */
1166 buf[i++] = 0x57; /* push %rdi */
1167 buf[i++] = 0x56; /* push %rsi */
1168 buf[i++] = 0x52; /* push %rdx */
1169 buf[i++] = 0x51; /* push %rcx */
1170 buf[i++] = 0x53; /* push %rbx */
1171 buf[i++] = 0x50; /* push %rax */
1172 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1173 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1174 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1175 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1176 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1177 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1178 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1179 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1180 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1181 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1182 buf[i++] = 0xbf;
c8ef42ee
PA
1183 memcpy (buf + i, &tpaddr, 8);
1184 i += 8;
fa593d66
PA
1185 buf[i++] = 0x57; /* push %rdi */
1186 append_insns (&buildaddr, i, buf);
1187
1188 /* Stack space for the collecting_t object. */
1189 i = 0;
1190 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1191 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1192 memcpy (buf + i, &tpoint, 8);
1193 i += 8;
1194 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1195 i += push_opcode (&buf[i],
1196 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1197 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1198 append_insns (&buildaddr, i, buf);
1199
1200 /* spin-lock. */
1201 i = 0;
1202 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1203 memcpy (&buf[i], (void *) &lockaddr, 8);
1204 i += 8;
1205 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1206 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1207 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1208 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1209 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1210 append_insns (&buildaddr, i, buf);
1211
1212 /* Set up the gdb_collect call. */
1213 /* At this point, (stack pointer + 0x18) is the base of our saved
1214 register block. */
1215
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1218 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1219
1220 /* tpoint address may be 64-bit wide. */
1221 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1222 memcpy (buf + i, &tpoint, 8);
1223 i += 8;
1224 append_insns (&buildaddr, i, buf);
1225
1226 /* The collector function being in the shared library, may be
1227 >31-bits away off the jump pad. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1230 memcpy (buf + i, &collector, 8);
1231 i += 8;
1232 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1233 append_insns (&buildaddr, i, buf);
1234
1235 /* Clear the spin-lock. */
1236 i = 0;
1237 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1238 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1239 memcpy (buf + i, &lockaddr, 8);
1240 i += 8;
1241 append_insns (&buildaddr, i, buf);
1242
1243 /* Remove stack that had been used for the collect_t object. */
1244 i = 0;
1245 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1246 append_insns (&buildaddr, i, buf);
1247
1248 /* Restore register state. */
1249 i = 0;
1250 buf[i++] = 0x48; /* add $0x8,%rsp */
1251 buf[i++] = 0x83;
1252 buf[i++] = 0xc4;
1253 buf[i++] = 0x08;
1254 buf[i++] = 0x9d; /* popfq */
1255 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1256 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1257 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1258 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1259 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1260 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1261 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1262 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1263 buf[i++] = 0x58; /* pop %rax */
1264 buf[i++] = 0x5b; /* pop %rbx */
1265 buf[i++] = 0x59; /* pop %rcx */
1266 buf[i++] = 0x5a; /* pop %rdx */
1267 buf[i++] = 0x5e; /* pop %rsi */
1268 buf[i++] = 0x5f; /* pop %rdi */
1269 buf[i++] = 0x5d; /* pop %rbp */
1270 buf[i++] = 0x5c; /* pop %rsp */
1271 append_insns (&buildaddr, i, buf);
1272
1273 /* Now, adjust the original instruction to execute in the jump
1274 pad. */
1275 *adjusted_insn_addr = buildaddr;
1276 relocate_instruction (&buildaddr, tpaddr);
1277 *adjusted_insn_addr_end = buildaddr;
1278
1279 /* Finally, write a jump back to the program. */
f4647387
YQ
1280
1281 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1282 if (loffset > INT_MAX || loffset < INT_MIN)
1283 {
1284 sprintf (err,
1285 "E.Jump back from jump pad too far from tracepoint "
1286 "(offset 0x%" PRIx64 " > int32).", loffset);
1287 return 1;
1288 }
1289
1290 offset = (int) loffset;
fa593d66
PA
1291 memcpy (buf, jump_insn, sizeof (jump_insn));
1292 memcpy (buf + 1, &offset, 4);
1293 append_insns (&buildaddr, sizeof (jump_insn), buf);
1294
1295 /* The jump pad is now built. Wire in a jump to our jump pad. This
1296 is always done last (by our caller actually), so that we can
1297 install fast tracepoints with threads running. This relies on
1298 the agent's atomic write support. */
f4647387
YQ
1299 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1300 if (loffset > INT_MAX || loffset < INT_MIN)
1301 {
1302 sprintf (err,
1303 "E.Jump pad too far from tracepoint "
1304 "(offset 0x%" PRIx64 " > int32).", loffset);
1305 return 1;
1306 }
1307
1308 offset = (int) loffset;
1309
fa593d66
PA
1310 memcpy (buf, jump_insn, sizeof (jump_insn));
1311 memcpy (buf + 1, &offset, 4);
1312 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1313 *jjump_pad_insn_size = sizeof (jump_insn);
1314
1315 /* Return the end address of our pad. */
1316 *jump_entry = buildaddr;
1317
1318 return 0;
1319}
1320
1321#endif /* __x86_64__ */
1322
1323/* Build a jump pad that saves registers and calls a collection
1324 function. Writes a jump instruction to the jump pad to
1325 JJUMPAD_INSN. The caller is responsible to write it in at the
1326 tracepoint address. */
1327
1328static int
1329i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1330 CORE_ADDR collector,
1331 CORE_ADDR lockaddr,
1332 ULONGEST orig_size,
1333 CORE_ADDR *jump_entry,
405f8e94
SS
1334 CORE_ADDR *trampoline,
1335 ULONGEST *trampoline_size,
fa593d66
PA
1336 unsigned char *jjump_pad_insn,
1337 ULONGEST *jjump_pad_insn_size,
1338 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1339 CORE_ADDR *adjusted_insn_addr_end,
1340 char *err)
fa593d66
PA
1341{
1342 unsigned char buf[0x100];
1343 int i, offset;
1344 CORE_ADDR buildaddr = *jump_entry;
1345
1346 /* Build the jump pad. */
1347
1348 /* First, do tracepoint data collection. Save registers. */
1349 i = 0;
1350 buf[i++] = 0x60; /* pushad */
1351 buf[i++] = 0x68; /* push tpaddr aka $pc */
1352 *((int *)(buf + i)) = (int) tpaddr;
1353 i += 4;
1354 buf[i++] = 0x9c; /* pushf */
1355 buf[i++] = 0x1e; /* push %ds */
1356 buf[i++] = 0x06; /* push %es */
1357 buf[i++] = 0x0f; /* push %fs */
1358 buf[i++] = 0xa0;
1359 buf[i++] = 0x0f; /* push %gs */
1360 buf[i++] = 0xa8;
1361 buf[i++] = 0x16; /* push %ss */
1362 buf[i++] = 0x0e; /* push %cs */
1363 append_insns (&buildaddr, i, buf);
1364
1365 /* Stack space for the collecting_t object. */
1366 i = 0;
1367 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1368
1369 /* Build the object. */
1370 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1371 memcpy (buf + i, &tpoint, 4);
1372 i += 4;
1373 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1374
1375 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1376 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1377 append_insns (&buildaddr, i, buf);
1378
1379 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1380 If we cared for it, this could be using xchg alternatively. */
1381
1382 i = 0;
1383 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1384 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1385 %esp,<lockaddr> */
1386 memcpy (&buf[i], (void *) &lockaddr, 4);
1387 i += 4;
1388 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1389 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1390 append_insns (&buildaddr, i, buf);
1391
1392
1393 /* Set up arguments to the gdb_collect call. */
1394 i = 0;
1395 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1396 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1397 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1398 append_insns (&buildaddr, i, buf);
1399
1400 i = 0;
1401 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1402 append_insns (&buildaddr, i, buf);
1403
1404 i = 0;
1405 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1406 memcpy (&buf[i], (void *) &tpoint, 4);
1407 i += 4;
1408 append_insns (&buildaddr, i, buf);
1409
1410 buf[0] = 0xe8; /* call <reladdr> */
1411 offset = collector - (buildaddr + sizeof (jump_insn));
1412 memcpy (buf + 1, &offset, 4);
1413 append_insns (&buildaddr, 5, buf);
1414 /* Clean up after the call. */
1415 buf[0] = 0x83; /* add $0x8,%esp */
1416 buf[1] = 0xc4;
1417 buf[2] = 0x08;
1418 append_insns (&buildaddr, 3, buf);
1419
1420
1421 /* Clear the spin-lock. This would need the LOCK prefix on older
1422 broken archs. */
1423 i = 0;
1424 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1425 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1426 memcpy (buf + i, &lockaddr, 4);
1427 i += 4;
1428 append_insns (&buildaddr, i, buf);
1429
1430
1431 /* Remove stack that had been used for the collect_t object. */
1432 i = 0;
1433 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1434 append_insns (&buildaddr, i, buf);
1435
1436 i = 0;
1437 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1438 buf[i++] = 0xc4;
1439 buf[i++] = 0x04;
1440 buf[i++] = 0x17; /* pop %ss */
1441 buf[i++] = 0x0f; /* pop %gs */
1442 buf[i++] = 0xa9;
1443 buf[i++] = 0x0f; /* pop %fs */
1444 buf[i++] = 0xa1;
1445 buf[i++] = 0x07; /* pop %es */
405f8e94 1446 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1447 buf[i++] = 0x9d; /* popf */
1448 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1449 buf[i++] = 0xc4;
1450 buf[i++] = 0x04;
1451 buf[i++] = 0x61; /* popad */
1452 append_insns (&buildaddr, i, buf);
1453
1454 /* Now, adjust the original instruction to execute in the jump
1455 pad. */
1456 *adjusted_insn_addr = buildaddr;
1457 relocate_instruction (&buildaddr, tpaddr);
1458 *adjusted_insn_addr_end = buildaddr;
1459
1460 /* Write the jump back to the program. */
1461 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1462 memcpy (buf, jump_insn, sizeof (jump_insn));
1463 memcpy (buf + 1, &offset, 4);
1464 append_insns (&buildaddr, sizeof (jump_insn), buf);
1465
1466 /* The jump pad is now built. Wire in a jump to our jump pad. This
1467 is always done last (by our caller actually), so that we can
1468 install fast tracepoints with threads running. This relies on
1469 the agent's atomic write support. */
405f8e94
SS
1470 if (orig_size == 4)
1471 {
1472 /* Create a trampoline. */
1473 *trampoline_size = sizeof (jump_insn);
1474 if (!claim_trampoline_space (*trampoline_size, trampoline))
1475 {
1476 /* No trampoline space available. */
1477 strcpy (err,
1478 "E.Cannot allocate trampoline space needed for fast "
1479 "tracepoints on 4-byte instructions.");
1480 return 1;
1481 }
1482
1483 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1484 memcpy (buf, jump_insn, sizeof (jump_insn));
1485 memcpy (buf + 1, &offset, 4);
4196ab2a 1486 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1487
1488 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1489 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1490 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1491 memcpy (buf + 2, &offset, 2);
1492 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1493 *jjump_pad_insn_size = sizeof (small_jump_insn);
1494 }
1495 else
1496 {
1497 /* Else use a 32-bit relative jump instruction. */
1498 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1499 memcpy (buf, jump_insn, sizeof (jump_insn));
1500 memcpy (buf + 1, &offset, 4);
1501 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1502 *jjump_pad_insn_size = sizeof (jump_insn);
1503 }
fa593d66
PA
1504
1505 /* Return the end address of our pad. */
1506 *jump_entry = buildaddr;
1507
1508 return 0;
1509}
1510
1511static int
1512x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1513 CORE_ADDR collector,
1514 CORE_ADDR lockaddr,
1515 ULONGEST orig_size,
1516 CORE_ADDR *jump_entry,
405f8e94
SS
1517 CORE_ADDR *trampoline,
1518 ULONGEST *trampoline_size,
fa593d66
PA
1519 unsigned char *jjump_pad_insn,
1520 ULONGEST *jjump_pad_insn_size,
1521 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1522 CORE_ADDR *adjusted_insn_addr_end,
1523 char *err)
fa593d66
PA
1524{
1525#ifdef __x86_64__
3aee8918 1526 if (is_64bit_tdesc ())
fa593d66
PA
1527 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1528 collector, lockaddr,
1529 orig_size, jump_entry,
405f8e94 1530 trampoline, trampoline_size,
fa593d66
PA
1531 jjump_pad_insn,
1532 jjump_pad_insn_size,
1533 adjusted_insn_addr,
405f8e94
SS
1534 adjusted_insn_addr_end,
1535 err);
fa593d66
PA
1536#endif
1537
1538 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1539 collector, lockaddr,
1540 orig_size, jump_entry,
405f8e94 1541 trampoline, trampoline_size,
fa593d66
PA
1542 jjump_pad_insn,
1543 jjump_pad_insn_size,
1544 adjusted_insn_addr,
405f8e94
SS
1545 adjusted_insn_addr_end,
1546 err);
1547}
1548
1549/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1550 architectures. */
1551
1552static int
1553x86_get_min_fast_tracepoint_insn_len (void)
1554{
1555 static int warned_about_fast_tracepoints = 0;
1556
1557#ifdef __x86_64__
1558 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1559 used for fast tracepoints. */
3aee8918 1560 if (is_64bit_tdesc ())
405f8e94
SS
1561 return 5;
1562#endif
1563
58b4daa5 1564 if (agent_loaded_p ())
405f8e94
SS
1565 {
1566 char errbuf[IPA_BUFSIZ];
1567
1568 errbuf[0] = '\0';
1569
1570 /* On x86, if trampolines are available, then 4-byte jump instructions
1571 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1572 with a 4-byte offset are used instead. */
1573 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1574 return 4;
1575 else
1576 {
1577 /* GDB has no channel to explain to user why a shorter fast
1578 tracepoint is not possible, but at least make GDBserver
1579 mention that something has gone awry. */
1580 if (!warned_about_fast_tracepoints)
1581 {
422186a9 1582 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1583 warned_about_fast_tracepoints = 1;
1584 }
1585 return 5;
1586 }
1587 }
1588 else
1589 {
1590 /* Indicate that the minimum length is currently unknown since the IPA
1591 has not loaded yet. */
1592 return 0;
1593 }
fa593d66
PA
1594}
1595
6a271cae
PA
1596static void
1597add_insns (unsigned char *start, int len)
1598{
1599 CORE_ADDR buildaddr = current_insn_ptr;
1600
1601 if (debug_threads)
87ce2a04
DE
1602 debug_printf ("Adding %d bytes of insn at %s\n",
1603 len, paddress (buildaddr));
6a271cae
PA
1604
1605 append_insns (&buildaddr, len, start);
1606 current_insn_ptr = buildaddr;
1607}
1608
6a271cae
PA
1609/* Our general strategy for emitting code is to avoid specifying raw
1610 bytes whenever possible, and instead copy a block of inline asm
1611 that is embedded in the function. This is a little messy, because
1612 we need to keep the compiler from discarding what looks like dead
1613 code, plus suppress various warnings. */
1614
9e4344e5
PA
1615#define EMIT_ASM(NAME, INSNS) \
1616 do \
1617 { \
1618 extern unsigned char start_ ## NAME, end_ ## NAME; \
1619 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1620 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1621 "\t" "start_" #NAME ":" \
1622 "\t" INSNS "\n" \
1623 "\t" "end_" #NAME ":"); \
1624 } while (0)
6a271cae
PA
1625
1626#ifdef __x86_64__
1627
1628#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1629 do \
1630 { \
1631 extern unsigned char start_ ## NAME, end_ ## NAME; \
1632 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1633 __asm__ (".code32\n" \
1634 "\t" "jmp end_" #NAME "\n" \
1635 "\t" "start_" #NAME ":\n" \
1636 "\t" INSNS "\n" \
1637 "\t" "end_" #NAME ":\n" \
1638 ".code64\n"); \
1639 } while (0)
6a271cae
PA
1640
1641#else
1642
1643#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1644
1645#endif
1646
1647#ifdef __x86_64__
1648
1649static void
1650amd64_emit_prologue (void)
1651{
1652 EMIT_ASM (amd64_prologue,
1653 "pushq %rbp\n\t"
1654 "movq %rsp,%rbp\n\t"
1655 "sub $0x20,%rsp\n\t"
1656 "movq %rdi,-8(%rbp)\n\t"
1657 "movq %rsi,-16(%rbp)");
1658}
1659
1660
1661static void
1662amd64_emit_epilogue (void)
1663{
1664 EMIT_ASM (amd64_epilogue,
1665 "movq -16(%rbp),%rdi\n\t"
1666 "movq %rax,(%rdi)\n\t"
1667 "xor %rax,%rax\n\t"
1668 "leave\n\t"
1669 "ret");
1670}
1671
1672static void
1673amd64_emit_add (void)
1674{
1675 EMIT_ASM (amd64_add,
1676 "add (%rsp),%rax\n\t"
1677 "lea 0x8(%rsp),%rsp");
1678}
1679
1680static void
1681amd64_emit_sub (void)
1682{
1683 EMIT_ASM (amd64_sub,
1684 "sub %rax,(%rsp)\n\t"
1685 "pop %rax");
1686}
1687
1688static void
1689amd64_emit_mul (void)
1690{
1691 emit_error = 1;
1692}
1693
1694static void
1695amd64_emit_lsh (void)
1696{
1697 emit_error = 1;
1698}
1699
1700static void
1701amd64_emit_rsh_signed (void)
1702{
1703 emit_error = 1;
1704}
1705
1706static void
1707amd64_emit_rsh_unsigned (void)
1708{
1709 emit_error = 1;
1710}
1711
1712static void
1713amd64_emit_ext (int arg)
1714{
1715 switch (arg)
1716 {
1717 case 8:
1718 EMIT_ASM (amd64_ext_8,
1719 "cbtw\n\t"
1720 "cwtl\n\t"
1721 "cltq");
1722 break;
1723 case 16:
1724 EMIT_ASM (amd64_ext_16,
1725 "cwtl\n\t"
1726 "cltq");
1727 break;
1728 case 32:
1729 EMIT_ASM (amd64_ext_32,
1730 "cltq");
1731 break;
1732 default:
1733 emit_error = 1;
1734 }
1735}
1736
1737static void
1738amd64_emit_log_not (void)
1739{
1740 EMIT_ASM (amd64_log_not,
1741 "test %rax,%rax\n\t"
1742 "sete %cl\n\t"
1743 "movzbq %cl,%rax");
1744}
1745
1746static void
1747amd64_emit_bit_and (void)
1748{
1749 EMIT_ASM (amd64_and,
1750 "and (%rsp),%rax\n\t"
1751 "lea 0x8(%rsp),%rsp");
1752}
1753
1754static void
1755amd64_emit_bit_or (void)
1756{
1757 EMIT_ASM (amd64_or,
1758 "or (%rsp),%rax\n\t"
1759 "lea 0x8(%rsp),%rsp");
1760}
1761
1762static void
1763amd64_emit_bit_xor (void)
1764{
1765 EMIT_ASM (amd64_xor,
1766 "xor (%rsp),%rax\n\t"
1767 "lea 0x8(%rsp),%rsp");
1768}
1769
1770static void
1771amd64_emit_bit_not (void)
1772{
1773 EMIT_ASM (amd64_bit_not,
1774 "xorq $0xffffffffffffffff,%rax");
1775}
1776
1777static void
1778amd64_emit_equal (void)
1779{
1780 EMIT_ASM (amd64_equal,
1781 "cmp %rax,(%rsp)\n\t"
1782 "je .Lamd64_equal_true\n\t"
1783 "xor %rax,%rax\n\t"
1784 "jmp .Lamd64_equal_end\n\t"
1785 ".Lamd64_equal_true:\n\t"
1786 "mov $0x1,%rax\n\t"
1787 ".Lamd64_equal_end:\n\t"
1788 "lea 0x8(%rsp),%rsp");
1789}
1790
1791static void
1792amd64_emit_less_signed (void)
1793{
1794 EMIT_ASM (amd64_less_signed,
1795 "cmp %rax,(%rsp)\n\t"
1796 "jl .Lamd64_less_signed_true\n\t"
1797 "xor %rax,%rax\n\t"
1798 "jmp .Lamd64_less_signed_end\n\t"
1799 ".Lamd64_less_signed_true:\n\t"
1800 "mov $1,%rax\n\t"
1801 ".Lamd64_less_signed_end:\n\t"
1802 "lea 0x8(%rsp),%rsp");
1803}
1804
1805static void
1806amd64_emit_less_unsigned (void)
1807{
1808 EMIT_ASM (amd64_less_unsigned,
1809 "cmp %rax,(%rsp)\n\t"
1810 "jb .Lamd64_less_unsigned_true\n\t"
1811 "xor %rax,%rax\n\t"
1812 "jmp .Lamd64_less_unsigned_end\n\t"
1813 ".Lamd64_less_unsigned_true:\n\t"
1814 "mov $1,%rax\n\t"
1815 ".Lamd64_less_unsigned_end:\n\t"
1816 "lea 0x8(%rsp),%rsp");
1817}
1818
1819static void
1820amd64_emit_ref (int size)
1821{
1822 switch (size)
1823 {
1824 case 1:
1825 EMIT_ASM (amd64_ref1,
1826 "movb (%rax),%al");
1827 break;
1828 case 2:
1829 EMIT_ASM (amd64_ref2,
1830 "movw (%rax),%ax");
1831 break;
1832 case 4:
1833 EMIT_ASM (amd64_ref4,
1834 "movl (%rax),%eax");
1835 break;
1836 case 8:
1837 EMIT_ASM (amd64_ref8,
1838 "movq (%rax),%rax");
1839 break;
1840 }
1841}
1842
1843static void
1844amd64_emit_if_goto (int *offset_p, int *size_p)
1845{
1846 EMIT_ASM (amd64_if_goto,
1847 "mov %rax,%rcx\n\t"
1848 "pop %rax\n\t"
1849 "cmp $0,%rcx\n\t"
1850 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1851 if (offset_p)
1852 *offset_p = 10;
1853 if (size_p)
1854 *size_p = 4;
1855}
1856
1857static void
1858amd64_emit_goto (int *offset_p, int *size_p)
1859{
1860 EMIT_ASM (amd64_goto,
1861 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1862 if (offset_p)
1863 *offset_p = 1;
1864 if (size_p)
1865 *size_p = 4;
1866}
1867
1868static void
1869amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1870{
1871 int diff = (to - (from + size));
1872 unsigned char buf[sizeof (int)];
1873
1874 if (size != 4)
1875 {
1876 emit_error = 1;
1877 return;
1878 }
1879
1880 memcpy (buf, &diff, sizeof (int));
4196ab2a 1881 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1882}
1883
1884static void
4e29fb54 1885amd64_emit_const (LONGEST num)
6a271cae
PA
1886{
1887 unsigned char buf[16];
1888 int i;
1889 CORE_ADDR buildaddr = current_insn_ptr;
1890
1891 i = 0;
1892 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1893 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1894 i += 8;
1895 append_insns (&buildaddr, i, buf);
1896 current_insn_ptr = buildaddr;
1897}
1898
1899static void
1900amd64_emit_call (CORE_ADDR fn)
1901{
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr;
4e29fb54 1905 LONGEST offset64;
6a271cae
PA
1906
1907 /* The destination function being in the shared library, may be
1908 >31-bits away off the compiled code pad. */
1909
1910 buildaddr = current_insn_ptr;
1911
1912 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1913
1914 i = 0;
1915
1916 if (offset64 > INT_MAX || offset64 < INT_MIN)
1917 {
1918 /* Offset is too large for a call. Use callq, but that requires
1919 a register, so avoid it if possible. Use r10, since it is
1920 call-clobbered, we don't have to push/pop it. */
1921 buf[i++] = 0x48; /* mov $fn,%r10 */
1922 buf[i++] = 0xba;
1923 memcpy (buf + i, &fn, 8);
1924 i += 8;
1925 buf[i++] = 0xff; /* callq *%r10 */
1926 buf[i++] = 0xd2;
1927 }
1928 else
1929 {
1930 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1931
1932 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1933 memcpy (buf + i, &offset32, 4);
1934 i += 4;
1935 }
1936
1937 append_insns (&buildaddr, i, buf);
1938 current_insn_ptr = buildaddr;
1939}
1940
1941static void
1942amd64_emit_reg (int reg)
1943{
1944 unsigned char buf[16];
1945 int i;
1946 CORE_ADDR buildaddr;
1947
1948 /* Assume raw_regs is still in %rdi. */
1949 buildaddr = current_insn_ptr;
1950 i = 0;
1951 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1952 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1953 i += 4;
1954 append_insns (&buildaddr, i, buf);
1955 current_insn_ptr = buildaddr;
1956 amd64_emit_call (get_raw_reg_func_addr ());
1957}
1958
1959static void
1960amd64_emit_pop (void)
1961{
1962 EMIT_ASM (amd64_pop,
1963 "pop %rax");
1964}
1965
1966static void
1967amd64_emit_stack_flush (void)
1968{
1969 EMIT_ASM (amd64_stack_flush,
1970 "push %rax");
1971}
1972
1973static void
1974amd64_emit_zero_ext (int arg)
1975{
1976 switch (arg)
1977 {
1978 case 8:
1979 EMIT_ASM (amd64_zero_ext_8,
1980 "and $0xff,%rax");
1981 break;
1982 case 16:
1983 EMIT_ASM (amd64_zero_ext_16,
1984 "and $0xffff,%rax");
1985 break;
1986 case 32:
1987 EMIT_ASM (amd64_zero_ext_32,
1988 "mov $0xffffffff,%rcx\n\t"
1989 "and %rcx,%rax");
1990 break;
1991 default:
1992 emit_error = 1;
1993 }
1994}
1995
1996static void
1997amd64_emit_swap (void)
1998{
1999 EMIT_ASM (amd64_swap,
2000 "mov %rax,%rcx\n\t"
2001 "pop %rax\n\t"
2002 "push %rcx");
2003}
2004
2005static void
2006amd64_emit_stack_adjust (int n)
2007{
2008 unsigned char buf[16];
2009 int i;
2010 CORE_ADDR buildaddr = current_insn_ptr;
2011
2012 i = 0;
2013 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2014 buf[i++] = 0x8d;
2015 buf[i++] = 0x64;
2016 buf[i++] = 0x24;
2017 /* This only handles adjustments up to 16, but we don't expect any more. */
2018 buf[i++] = n * 8;
2019 append_insns (&buildaddr, i, buf);
2020 current_insn_ptr = buildaddr;
2021}
2022
2023/* FN's prototype is `LONGEST(*fn)(int)'. */
2024
2025static void
2026amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2027{
2028 unsigned char buf[16];
2029 int i;
2030 CORE_ADDR buildaddr;
2031
2032 buildaddr = current_insn_ptr;
2033 i = 0;
2034 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2035 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2036 i += 4;
2037 append_insns (&buildaddr, i, buf);
2038 current_insn_ptr = buildaddr;
2039 amd64_emit_call (fn);
2040}
2041
4e29fb54 2042/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2043
2044static void
2045amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2046{
2047 unsigned char buf[16];
2048 int i;
2049 CORE_ADDR buildaddr;
2050
2051 buildaddr = current_insn_ptr;
2052 i = 0;
2053 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2054 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2055 i += 4;
2056 append_insns (&buildaddr, i, buf);
2057 current_insn_ptr = buildaddr;
2058 EMIT_ASM (amd64_void_call_2_a,
2059 /* Save away a copy of the stack top. */
2060 "push %rax\n\t"
2061 /* Also pass top as the second argument. */
2062 "mov %rax,%rsi");
2063 amd64_emit_call (fn);
2064 EMIT_ASM (amd64_void_call_2_b,
2065 /* Restore the stack top, %rax may have been trashed. */
2066 "pop %rax");
2067}
2068
df4a0200 2069static void
6b9801d4
SS
2070amd64_emit_eq_goto (int *offset_p, int *size_p)
2071{
2072 EMIT_ASM (amd64_eq,
2073 "cmp %rax,(%rsp)\n\t"
2074 "jne .Lamd64_eq_fallthru\n\t"
2075 "lea 0x8(%rsp),%rsp\n\t"
2076 "pop %rax\n\t"
2077 /* jmp, but don't trust the assembler to choose the right jump */
2078 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2079 ".Lamd64_eq_fallthru:\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2081 "pop %rax");
2082
2083 if (offset_p)
2084 *offset_p = 13;
2085 if (size_p)
2086 *size_p = 4;
2087}
2088
df4a0200 2089static void
6b9801d4
SS
2090amd64_emit_ne_goto (int *offset_p, int *size_p)
2091{
2092 EMIT_ASM (amd64_ne,
2093 "cmp %rax,(%rsp)\n\t"
2094 "je .Lamd64_ne_fallthru\n\t"
2095 "lea 0x8(%rsp),%rsp\n\t"
2096 "pop %rax\n\t"
2097 /* jmp, but don't trust the assembler to choose the right jump */
2098 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2099 ".Lamd64_ne_fallthru:\n\t"
2100 "lea 0x8(%rsp),%rsp\n\t"
2101 "pop %rax");
2102
2103 if (offset_p)
2104 *offset_p = 13;
2105 if (size_p)
2106 *size_p = 4;
2107}
2108
df4a0200 2109static void
6b9801d4
SS
2110amd64_emit_lt_goto (int *offset_p, int *size_p)
2111{
2112 EMIT_ASM (amd64_lt,
2113 "cmp %rax,(%rsp)\n\t"
2114 "jnl .Lamd64_lt_fallthru\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2116 "pop %rax\n\t"
2117 /* jmp, but don't trust the assembler to choose the right jump */
2118 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2119 ".Lamd64_lt_fallthru:\n\t"
2120 "lea 0x8(%rsp),%rsp\n\t"
2121 "pop %rax");
2122
2123 if (offset_p)
2124 *offset_p = 13;
2125 if (size_p)
2126 *size_p = 4;
2127}
2128
df4a0200 2129static void
6b9801d4
SS
2130amd64_emit_le_goto (int *offset_p, int *size_p)
2131{
2132 EMIT_ASM (amd64_le,
2133 "cmp %rax,(%rsp)\n\t"
2134 "jnle .Lamd64_le_fallthru\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2136 "pop %rax\n\t"
2137 /* jmp, but don't trust the assembler to choose the right jump */
2138 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2139 ".Lamd64_le_fallthru:\n\t"
2140 "lea 0x8(%rsp),%rsp\n\t"
2141 "pop %rax");
2142
2143 if (offset_p)
2144 *offset_p = 13;
2145 if (size_p)
2146 *size_p = 4;
2147}
2148
df4a0200 2149static void
6b9801d4
SS
2150amd64_emit_gt_goto (int *offset_p, int *size_p)
2151{
2152 EMIT_ASM (amd64_gt,
2153 "cmp %rax,(%rsp)\n\t"
2154 "jng .Lamd64_gt_fallthru\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2156 "pop %rax\n\t"
2157 /* jmp, but don't trust the assembler to choose the right jump */
2158 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2159 ".Lamd64_gt_fallthru:\n\t"
2160 "lea 0x8(%rsp),%rsp\n\t"
2161 "pop %rax");
2162
2163 if (offset_p)
2164 *offset_p = 13;
2165 if (size_p)
2166 *size_p = 4;
2167}
2168
df4a0200 2169static void
6b9801d4
SS
2170amd64_emit_ge_goto (int *offset_p, int *size_p)
2171{
2172 EMIT_ASM (amd64_ge,
2173 "cmp %rax,(%rsp)\n\t"
2174 "jnge .Lamd64_ge_fallthru\n\t"
2175 ".Lamd64_ge_jump:\n\t"
2176 "lea 0x8(%rsp),%rsp\n\t"
2177 "pop %rax\n\t"
2178 /* jmp, but don't trust the assembler to choose the right jump */
2179 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2180 ".Lamd64_ge_fallthru:\n\t"
2181 "lea 0x8(%rsp),%rsp\n\t"
2182 "pop %rax");
2183
2184 if (offset_p)
2185 *offset_p = 13;
2186 if (size_p)
2187 *size_p = 4;
2188}
2189
6a271cae
PA
2190struct emit_ops amd64_emit_ops =
2191 {
2192 amd64_emit_prologue,
2193 amd64_emit_epilogue,
2194 amd64_emit_add,
2195 amd64_emit_sub,
2196 amd64_emit_mul,
2197 amd64_emit_lsh,
2198 amd64_emit_rsh_signed,
2199 amd64_emit_rsh_unsigned,
2200 amd64_emit_ext,
2201 amd64_emit_log_not,
2202 amd64_emit_bit_and,
2203 amd64_emit_bit_or,
2204 amd64_emit_bit_xor,
2205 amd64_emit_bit_not,
2206 amd64_emit_equal,
2207 amd64_emit_less_signed,
2208 amd64_emit_less_unsigned,
2209 amd64_emit_ref,
2210 amd64_emit_if_goto,
2211 amd64_emit_goto,
2212 amd64_write_goto_address,
2213 amd64_emit_const,
2214 amd64_emit_call,
2215 amd64_emit_reg,
2216 amd64_emit_pop,
2217 amd64_emit_stack_flush,
2218 amd64_emit_zero_ext,
2219 amd64_emit_swap,
2220 amd64_emit_stack_adjust,
2221 amd64_emit_int_call_1,
6b9801d4
SS
2222 amd64_emit_void_call_2,
2223 amd64_emit_eq_goto,
2224 amd64_emit_ne_goto,
2225 amd64_emit_lt_goto,
2226 amd64_emit_le_goto,
2227 amd64_emit_gt_goto,
2228 amd64_emit_ge_goto
6a271cae
PA
2229 };
2230
2231#endif /* __x86_64__ */
2232
2233static void
2234i386_emit_prologue (void)
2235{
2236 EMIT_ASM32 (i386_prologue,
2237 "push %ebp\n\t"
bf15cbda
SS
2238 "mov %esp,%ebp\n\t"
2239 "push %ebx");
6a271cae
PA
2240 /* At this point, the raw regs base address is at 8(%ebp), and the
2241 value pointer is at 12(%ebp). */
2242}
2243
2244static void
2245i386_emit_epilogue (void)
2246{
2247 EMIT_ASM32 (i386_epilogue,
2248 "mov 12(%ebp),%ecx\n\t"
2249 "mov %eax,(%ecx)\n\t"
2250 "mov %ebx,0x4(%ecx)\n\t"
2251 "xor %eax,%eax\n\t"
bf15cbda 2252 "pop %ebx\n\t"
6a271cae
PA
2253 "pop %ebp\n\t"
2254 "ret");
2255}
2256
2257static void
2258i386_emit_add (void)
2259{
2260 EMIT_ASM32 (i386_add,
2261 "add (%esp),%eax\n\t"
2262 "adc 0x4(%esp),%ebx\n\t"
2263 "lea 0x8(%esp),%esp");
2264}
2265
2266static void
2267i386_emit_sub (void)
2268{
2269 EMIT_ASM32 (i386_sub,
2270 "subl %eax,(%esp)\n\t"
2271 "sbbl %ebx,4(%esp)\n\t"
2272 "pop %eax\n\t"
2273 "pop %ebx\n\t");
2274}
2275
2276static void
2277i386_emit_mul (void)
2278{
2279 emit_error = 1;
2280}
2281
2282static void
2283i386_emit_lsh (void)
2284{
2285 emit_error = 1;
2286}
2287
2288static void
2289i386_emit_rsh_signed (void)
2290{
2291 emit_error = 1;
2292}
2293
2294static void
2295i386_emit_rsh_unsigned (void)
2296{
2297 emit_error = 1;
2298}
2299
2300static void
2301i386_emit_ext (int arg)
2302{
2303 switch (arg)
2304 {
2305 case 8:
2306 EMIT_ASM32 (i386_ext_8,
2307 "cbtw\n\t"
2308 "cwtl\n\t"
2309 "movl %eax,%ebx\n\t"
2310 "sarl $31,%ebx");
2311 break;
2312 case 16:
2313 EMIT_ASM32 (i386_ext_16,
2314 "cwtl\n\t"
2315 "movl %eax,%ebx\n\t"
2316 "sarl $31,%ebx");
2317 break;
2318 case 32:
2319 EMIT_ASM32 (i386_ext_32,
2320 "movl %eax,%ebx\n\t"
2321 "sarl $31,%ebx");
2322 break;
2323 default:
2324 emit_error = 1;
2325 }
2326}
2327
2328static void
2329i386_emit_log_not (void)
2330{
2331 EMIT_ASM32 (i386_log_not,
2332 "or %ebx,%eax\n\t"
2333 "test %eax,%eax\n\t"
2334 "sete %cl\n\t"
2335 "xor %ebx,%ebx\n\t"
2336 "movzbl %cl,%eax");
2337}
2338
2339static void
2340i386_emit_bit_and (void)
2341{
2342 EMIT_ASM32 (i386_and,
2343 "and (%esp),%eax\n\t"
2344 "and 0x4(%esp),%ebx\n\t"
2345 "lea 0x8(%esp),%esp");
2346}
2347
2348static void
2349i386_emit_bit_or (void)
2350{
2351 EMIT_ASM32 (i386_or,
2352 "or (%esp),%eax\n\t"
2353 "or 0x4(%esp),%ebx\n\t"
2354 "lea 0x8(%esp),%esp");
2355}
2356
2357static void
2358i386_emit_bit_xor (void)
2359{
2360 EMIT_ASM32 (i386_xor,
2361 "xor (%esp),%eax\n\t"
2362 "xor 0x4(%esp),%ebx\n\t"
2363 "lea 0x8(%esp),%esp");
2364}
2365
2366static void
2367i386_emit_bit_not (void)
2368{
2369 EMIT_ASM32 (i386_bit_not,
2370 "xor $0xffffffff,%eax\n\t"
2371 "xor $0xffffffff,%ebx\n\t");
2372}
2373
2374static void
2375i386_emit_equal (void)
2376{
2377 EMIT_ASM32 (i386_equal,
2378 "cmpl %ebx,4(%esp)\n\t"
2379 "jne .Li386_equal_false\n\t"
2380 "cmpl %eax,(%esp)\n\t"
2381 "je .Li386_equal_true\n\t"
2382 ".Li386_equal_false:\n\t"
2383 "xor %eax,%eax\n\t"
2384 "jmp .Li386_equal_end\n\t"
2385 ".Li386_equal_true:\n\t"
2386 "mov $1,%eax\n\t"
2387 ".Li386_equal_end:\n\t"
2388 "xor %ebx,%ebx\n\t"
2389 "lea 0x8(%esp),%esp");
2390}
2391
2392static void
2393i386_emit_less_signed (void)
2394{
2395 EMIT_ASM32 (i386_less_signed,
2396 "cmpl %ebx,4(%esp)\n\t"
2397 "jl .Li386_less_signed_true\n\t"
2398 "jne .Li386_less_signed_false\n\t"
2399 "cmpl %eax,(%esp)\n\t"
2400 "jl .Li386_less_signed_true\n\t"
2401 ".Li386_less_signed_false:\n\t"
2402 "xor %eax,%eax\n\t"
2403 "jmp .Li386_less_signed_end\n\t"
2404 ".Li386_less_signed_true:\n\t"
2405 "mov $1,%eax\n\t"
2406 ".Li386_less_signed_end:\n\t"
2407 "xor %ebx,%ebx\n\t"
2408 "lea 0x8(%esp),%esp");
2409}
2410
2411static void
2412i386_emit_less_unsigned (void)
2413{
2414 EMIT_ASM32 (i386_less_unsigned,
2415 "cmpl %ebx,4(%esp)\n\t"
2416 "jb .Li386_less_unsigned_true\n\t"
2417 "jne .Li386_less_unsigned_false\n\t"
2418 "cmpl %eax,(%esp)\n\t"
2419 "jb .Li386_less_unsigned_true\n\t"
2420 ".Li386_less_unsigned_false:\n\t"
2421 "xor %eax,%eax\n\t"
2422 "jmp .Li386_less_unsigned_end\n\t"
2423 ".Li386_less_unsigned_true:\n\t"
2424 "mov $1,%eax\n\t"
2425 ".Li386_less_unsigned_end:\n\t"
2426 "xor %ebx,%ebx\n\t"
2427 "lea 0x8(%esp),%esp");
2428}
2429
2430static void
2431i386_emit_ref (int size)
2432{
2433 switch (size)
2434 {
2435 case 1:
2436 EMIT_ASM32 (i386_ref1,
2437 "movb (%eax),%al");
2438 break;
2439 case 2:
2440 EMIT_ASM32 (i386_ref2,
2441 "movw (%eax),%ax");
2442 break;
2443 case 4:
2444 EMIT_ASM32 (i386_ref4,
2445 "movl (%eax),%eax");
2446 break;
2447 case 8:
2448 EMIT_ASM32 (i386_ref8,
2449 "movl 4(%eax),%ebx\n\t"
2450 "movl (%eax),%eax");
2451 break;
2452 }
2453}
2454
2455static void
2456i386_emit_if_goto (int *offset_p, int *size_p)
2457{
2458 EMIT_ASM32 (i386_if_goto,
2459 "mov %eax,%ecx\n\t"
2460 "or %ebx,%ecx\n\t"
2461 "pop %eax\n\t"
2462 "pop %ebx\n\t"
2463 "cmpl $0,%ecx\n\t"
2464 /* Don't trust the assembler to choose the right jump */
2465 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2466
2467 if (offset_p)
2468 *offset_p = 11; /* be sure that this matches the sequence above */
2469 if (size_p)
2470 *size_p = 4;
2471}
2472
2473static void
2474i386_emit_goto (int *offset_p, int *size_p)
2475{
2476 EMIT_ASM32 (i386_goto,
2477 /* Don't trust the assembler to choose the right jump */
2478 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2479 if (offset_p)
2480 *offset_p = 1;
2481 if (size_p)
2482 *size_p = 4;
2483}
2484
2485static void
2486i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2487{
2488 int diff = (to - (from + size));
2489 unsigned char buf[sizeof (int)];
2490
2491 /* We're only doing 4-byte sizes at the moment. */
2492 if (size != 4)
2493 {
2494 emit_error = 1;
2495 return;
2496 }
2497
2498 memcpy (buf, &diff, sizeof (int));
4196ab2a 2499 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2500}
2501
2502static void
4e29fb54 2503i386_emit_const (LONGEST num)
6a271cae
PA
2504{
2505 unsigned char buf[16];
b00ad6ff 2506 int i, hi, lo;
6a271cae
PA
2507 CORE_ADDR buildaddr = current_insn_ptr;
2508
2509 i = 0;
2510 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2511 lo = num & 0xffffffff;
2512 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2513 i += 4;
2514 hi = ((num >> 32) & 0xffffffff);
2515 if (hi)
2516 {
2517 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2518 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2519 i += 4;
2520 }
2521 else
2522 {
2523 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2524 }
2525 append_insns (&buildaddr, i, buf);
2526 current_insn_ptr = buildaddr;
2527}
2528
2529static void
2530i386_emit_call (CORE_ADDR fn)
2531{
2532 unsigned char buf[16];
2533 int i, offset;
2534 CORE_ADDR buildaddr;
2535
2536 buildaddr = current_insn_ptr;
2537 i = 0;
2538 buf[i++] = 0xe8; /* call <reladdr> */
2539 offset = ((int) fn) - (buildaddr + 5);
2540 memcpy (buf + 1, &offset, 4);
2541 append_insns (&buildaddr, 5, buf);
2542 current_insn_ptr = buildaddr;
2543}
2544
2545static void
2546i386_emit_reg (int reg)
2547{
2548 unsigned char buf[16];
2549 int i;
2550 CORE_ADDR buildaddr;
2551
2552 EMIT_ASM32 (i386_reg_a,
2553 "sub $0x8,%esp");
2554 buildaddr = current_insn_ptr;
2555 i = 0;
2556 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2557 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2558 i += 4;
2559 append_insns (&buildaddr, i, buf);
2560 current_insn_ptr = buildaddr;
2561 EMIT_ASM32 (i386_reg_b,
2562 "mov %eax,4(%esp)\n\t"
2563 "mov 8(%ebp),%eax\n\t"
2564 "mov %eax,(%esp)");
2565 i386_emit_call (get_raw_reg_func_addr ());
2566 EMIT_ASM32 (i386_reg_c,
2567 "xor %ebx,%ebx\n\t"
2568 "lea 0x8(%esp),%esp");
2569}
2570
2571static void
2572i386_emit_pop (void)
2573{
2574 EMIT_ASM32 (i386_pop,
2575 "pop %eax\n\t"
2576 "pop %ebx");
2577}
2578
2579static void
2580i386_emit_stack_flush (void)
2581{
2582 EMIT_ASM32 (i386_stack_flush,
2583 "push %ebx\n\t"
2584 "push %eax");
2585}
2586
2587static void
2588i386_emit_zero_ext (int arg)
2589{
2590 switch (arg)
2591 {
2592 case 8:
2593 EMIT_ASM32 (i386_zero_ext_8,
2594 "and $0xff,%eax\n\t"
2595 "xor %ebx,%ebx");
2596 break;
2597 case 16:
2598 EMIT_ASM32 (i386_zero_ext_16,
2599 "and $0xffff,%eax\n\t"
2600 "xor %ebx,%ebx");
2601 break;
2602 case 32:
2603 EMIT_ASM32 (i386_zero_ext_32,
2604 "xor %ebx,%ebx");
2605 break;
2606 default:
2607 emit_error = 1;
2608 }
2609}
2610
2611static void
2612i386_emit_swap (void)
2613{
2614 EMIT_ASM32 (i386_swap,
2615 "mov %eax,%ecx\n\t"
2616 "mov %ebx,%edx\n\t"
2617 "pop %eax\n\t"
2618 "pop %ebx\n\t"
2619 "push %edx\n\t"
2620 "push %ecx");
2621}
2622
2623static void
2624i386_emit_stack_adjust (int n)
2625{
2626 unsigned char buf[16];
2627 int i;
2628 CORE_ADDR buildaddr = current_insn_ptr;
2629
2630 i = 0;
2631 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2632 buf[i++] = 0x64;
2633 buf[i++] = 0x24;
2634 buf[i++] = n * 8;
2635 append_insns (&buildaddr, i, buf);
2636 current_insn_ptr = buildaddr;
2637}
2638
2639/* FN's prototype is `LONGEST(*fn)(int)'. */
2640
2641static void
2642i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2643{
2644 unsigned char buf[16];
2645 int i;
2646 CORE_ADDR buildaddr;
2647
2648 EMIT_ASM32 (i386_int_call_1_a,
2649 /* Reserve a bit of stack space. */
2650 "sub $0x8,%esp");
2651 /* Put the one argument on the stack. */
2652 buildaddr = current_insn_ptr;
2653 i = 0;
2654 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2655 buf[i++] = 0x04;
2656 buf[i++] = 0x24;
b00ad6ff 2657 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2658 i += 4;
2659 append_insns (&buildaddr, i, buf);
2660 current_insn_ptr = buildaddr;
2661 i386_emit_call (fn);
2662 EMIT_ASM32 (i386_int_call_1_c,
2663 "mov %edx,%ebx\n\t"
2664 "lea 0x8(%esp),%esp");
2665}
2666
4e29fb54 2667/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2668
2669static void
2670i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2671{
2672 unsigned char buf[16];
2673 int i;
2674 CORE_ADDR buildaddr;
2675
2676 EMIT_ASM32 (i386_void_call_2_a,
2677 /* Preserve %eax only; we don't have to worry about %ebx. */
2678 "push %eax\n\t"
2679 /* Reserve a bit of stack space for arguments. */
2680 "sub $0x10,%esp\n\t"
2681 /* Copy "top" to the second argument position. (Note that
2682 we can't assume function won't scribble on its
2683 arguments, so don't try to restore from this.) */
2684 "mov %eax,4(%esp)\n\t"
2685 "mov %ebx,8(%esp)");
2686 /* Put the first argument on the stack. */
2687 buildaddr = current_insn_ptr;
2688 i = 0;
2689 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2690 buf[i++] = 0x04;
2691 buf[i++] = 0x24;
b00ad6ff 2692 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2693 i += 4;
2694 append_insns (&buildaddr, i, buf);
2695 current_insn_ptr = buildaddr;
2696 i386_emit_call (fn);
2697 EMIT_ASM32 (i386_void_call_2_b,
2698 "lea 0x10(%esp),%esp\n\t"
2699 /* Restore original stack top. */
2700 "pop %eax");
2701}
2702
6b9801d4 2703
df4a0200 2704static void
6b9801d4
SS
2705i386_emit_eq_goto (int *offset_p, int *size_p)
2706{
2707 EMIT_ASM32 (eq,
2708 /* Check low half first, more likely to be decider */
2709 "cmpl %eax,(%esp)\n\t"
2710 "jne .Leq_fallthru\n\t"
2711 "cmpl %ebx,4(%esp)\n\t"
2712 "jne .Leq_fallthru\n\t"
2713 "lea 0x8(%esp),%esp\n\t"
2714 "pop %eax\n\t"
2715 "pop %ebx\n\t"
2716 /* jmp, but don't trust the assembler to choose the right jump */
2717 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2718 ".Leq_fallthru:\n\t"
2719 "lea 0x8(%esp),%esp\n\t"
2720 "pop %eax\n\t"
2721 "pop %ebx");
2722
2723 if (offset_p)
2724 *offset_p = 18;
2725 if (size_p)
2726 *size_p = 4;
2727}
2728
df4a0200 2729static void
6b9801d4
SS
2730i386_emit_ne_goto (int *offset_p, int *size_p)
2731{
2732 EMIT_ASM32 (ne,
2733 /* Check low half first, more likely to be decider */
2734 "cmpl %eax,(%esp)\n\t"
2735 "jne .Lne_jump\n\t"
2736 "cmpl %ebx,4(%esp)\n\t"
2737 "je .Lne_fallthru\n\t"
2738 ".Lne_jump:\n\t"
2739 "lea 0x8(%esp),%esp\n\t"
2740 "pop %eax\n\t"
2741 "pop %ebx\n\t"
2742 /* jmp, but don't trust the assembler to choose the right jump */
2743 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2744 ".Lne_fallthru:\n\t"
2745 "lea 0x8(%esp),%esp\n\t"
2746 "pop %eax\n\t"
2747 "pop %ebx");
2748
2749 if (offset_p)
2750 *offset_p = 18;
2751 if (size_p)
2752 *size_p = 4;
2753}
2754
df4a0200 2755static void
6b9801d4
SS
2756i386_emit_lt_goto (int *offset_p, int *size_p)
2757{
2758 EMIT_ASM32 (lt,
2759 "cmpl %ebx,4(%esp)\n\t"
2760 "jl .Llt_jump\n\t"
2761 "jne .Llt_fallthru\n\t"
2762 "cmpl %eax,(%esp)\n\t"
2763 "jnl .Llt_fallthru\n\t"
2764 ".Llt_jump:\n\t"
2765 "lea 0x8(%esp),%esp\n\t"
2766 "pop %eax\n\t"
2767 "pop %ebx\n\t"
2768 /* jmp, but don't trust the assembler to choose the right jump */
2769 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2770 ".Llt_fallthru:\n\t"
2771 "lea 0x8(%esp),%esp\n\t"
2772 "pop %eax\n\t"
2773 "pop %ebx");
2774
2775 if (offset_p)
2776 *offset_p = 20;
2777 if (size_p)
2778 *size_p = 4;
2779}
2780
df4a0200 2781static void
6b9801d4
SS
2782i386_emit_le_goto (int *offset_p, int *size_p)
2783{
2784 EMIT_ASM32 (le,
2785 "cmpl %ebx,4(%esp)\n\t"
2786 "jle .Lle_jump\n\t"
2787 "jne .Lle_fallthru\n\t"
2788 "cmpl %eax,(%esp)\n\t"
2789 "jnle .Lle_fallthru\n\t"
2790 ".Lle_jump:\n\t"
2791 "lea 0x8(%esp),%esp\n\t"
2792 "pop %eax\n\t"
2793 "pop %ebx\n\t"
2794 /* jmp, but don't trust the assembler to choose the right jump */
2795 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2796 ".Lle_fallthru:\n\t"
2797 "lea 0x8(%esp),%esp\n\t"
2798 "pop %eax\n\t"
2799 "pop %ebx");
2800
2801 if (offset_p)
2802 *offset_p = 20;
2803 if (size_p)
2804 *size_p = 4;
2805}
2806
df4a0200 2807static void
6b9801d4
SS
2808i386_emit_gt_goto (int *offset_p, int *size_p)
2809{
2810 EMIT_ASM32 (gt,
2811 "cmpl %ebx,4(%esp)\n\t"
2812 "jg .Lgt_jump\n\t"
2813 "jne .Lgt_fallthru\n\t"
2814 "cmpl %eax,(%esp)\n\t"
2815 "jng .Lgt_fallthru\n\t"
2816 ".Lgt_jump:\n\t"
2817 "lea 0x8(%esp),%esp\n\t"
2818 "pop %eax\n\t"
2819 "pop %ebx\n\t"
2820 /* jmp, but don't trust the assembler to choose the right jump */
2821 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2822 ".Lgt_fallthru:\n\t"
2823 "lea 0x8(%esp),%esp\n\t"
2824 "pop %eax\n\t"
2825 "pop %ebx");
2826
2827 if (offset_p)
2828 *offset_p = 20;
2829 if (size_p)
2830 *size_p = 4;
2831}
2832
df4a0200 2833static void
6b9801d4
SS
2834i386_emit_ge_goto (int *offset_p, int *size_p)
2835{
2836 EMIT_ASM32 (ge,
2837 "cmpl %ebx,4(%esp)\n\t"
2838 "jge .Lge_jump\n\t"
2839 "jne .Lge_fallthru\n\t"
2840 "cmpl %eax,(%esp)\n\t"
2841 "jnge .Lge_fallthru\n\t"
2842 ".Lge_jump:\n\t"
2843 "lea 0x8(%esp),%esp\n\t"
2844 "pop %eax\n\t"
2845 "pop %ebx\n\t"
2846 /* jmp, but don't trust the assembler to choose the right jump */
2847 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2848 ".Lge_fallthru:\n\t"
2849 "lea 0x8(%esp),%esp\n\t"
2850 "pop %eax\n\t"
2851 "pop %ebx");
2852
2853 if (offset_p)
2854 *offset_p = 20;
2855 if (size_p)
2856 *size_p = 4;
2857}
2858
6a271cae
PA
2859struct emit_ops i386_emit_ops =
2860 {
2861 i386_emit_prologue,
2862 i386_emit_epilogue,
2863 i386_emit_add,
2864 i386_emit_sub,
2865 i386_emit_mul,
2866 i386_emit_lsh,
2867 i386_emit_rsh_signed,
2868 i386_emit_rsh_unsigned,
2869 i386_emit_ext,
2870 i386_emit_log_not,
2871 i386_emit_bit_and,
2872 i386_emit_bit_or,
2873 i386_emit_bit_xor,
2874 i386_emit_bit_not,
2875 i386_emit_equal,
2876 i386_emit_less_signed,
2877 i386_emit_less_unsigned,
2878 i386_emit_ref,
2879 i386_emit_if_goto,
2880 i386_emit_goto,
2881 i386_write_goto_address,
2882 i386_emit_const,
2883 i386_emit_call,
2884 i386_emit_reg,
2885 i386_emit_pop,
2886 i386_emit_stack_flush,
2887 i386_emit_zero_ext,
2888 i386_emit_swap,
2889 i386_emit_stack_adjust,
2890 i386_emit_int_call_1,
6b9801d4
SS
2891 i386_emit_void_call_2,
2892 i386_emit_eq_goto,
2893 i386_emit_ne_goto,
2894 i386_emit_lt_goto,
2895 i386_emit_le_goto,
2896 i386_emit_gt_goto,
2897 i386_emit_ge_goto
6a271cae
PA
2898 };
2899
2900
2901static struct emit_ops *
2902x86_emit_ops (void)
2903{
2904#ifdef __x86_64__
3aee8918 2905 if (is_64bit_tdesc ())
6a271cae
PA
2906 return &amd64_emit_ops;
2907 else
2908#endif
2909 return &i386_emit_ops;
2910}
2911
3ca4edb6 2912/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2913
3ca4edb6
TBA
2914const gdb_byte *
2915x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2916{
2917 *size = x86_breakpoint_len;
2918 return x86_breakpoint;
2919}
2920
c2d6af84
PA
2921static int
2922x86_supports_range_stepping (void)
2923{
2924 return 1;
2925}
2926
7d00775e
AT
2927/* Implementation of linux_target_ops method "supports_hardware_single_step".
2928 */
2929
2930static int
2931x86_supports_hardware_single_step (void)
2932{
2933 return 1;
2934}
2935
ae91f625
MK
2936static int
2937x86_get_ipa_tdesc_idx (void)
2938{
2939 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2940 const struct target_desc *tdesc = regcache->tdesc;
2941
2942#ifdef __x86_64__
b4570e4b 2943 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2944#endif
2945
f49ff000 2946 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2947 return X86_TDESC_SSE;
ae91f625 2948
f49ff000 2949 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2950}
2951
d0722149
DE
2952/* This is initialized assuming an amd64 target.
2953 x86_arch_setup will correct it for i386 or amd64 targets. */
2954
2955struct linux_target_ops the_low_target =
2956{
1570b33e 2957 x86_linux_prepare_to_resume,
219f2f23 2958 x86_linux_process_qsupported,
fa593d66
PA
2959 x86_supports_tracepoints,
2960 x86_get_thread_area,
6a271cae 2961 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2962 x86_emit_ops,
2963 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2964 x86_supports_range_stepping,
7d00775e 2965 x86_supports_hardware_single_step,
82075af2 2966 x86_get_syscall_trapinfo,
ae91f625 2967 x86_get_ipa_tdesc_idx,
d0722149 2968};
3aee8918 2969
ef0478f6
TBA
2970/* The linux target ops object. */
2971
2972linux_process_target *the_linux_target = &the_x86_target;
2973
3aee8918
PA
2974void
2975initialize_low_arch (void)
2976{
2977 /* Initialize the Linux target descriptions. */
2978#ifdef __x86_64__
cc397f3a 2979 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2980 copy_target_description (tdesc_amd64_linux_no_xml,
2981 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2982 false));
3aee8918
PA
2983 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2984#endif
f49ff000 2985
cc397f3a 2986 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2987 copy_target_description (tdesc_i386_linux_no_xml,
2988 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2989 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2990
2991 initialize_regsets_info (&x86_regsets_info);
2992}
This page took 1.036161 seconds and 4 git commands to generate.