gdbserver/linux-low: turn 'get_pc' and 'set_pc' into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
797bcff5
TBA
109protected:
110
111 void low_arch_setup () override;
daca57a7
TBA
112
113 bool low_cannot_fetch_register (int regno) override;
114
115 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
116
117 bool low_supports_breakpoints () override;
118
119 CORE_ADDR low_get_pc (regcache *regcache) override;
120
121 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
ef0478f6
TBA
122};
123
124/* The singleton target ops object. */
125
126static x86_target the_x86_target;
127
aa5ca48f
DE
128/* Per-process arch-specific data we want to keep. */
129
130struct arch_process_info
131{
df7e5265 132 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
133};
134
d0722149
DE
135#ifdef __x86_64__
136
137/* Mapping between the general-purpose registers in `struct user'
138 format and GDB's register array layout.
139 Note that the transfer layout uses 64-bit regs. */
140static /*const*/ int i386_regmap[] =
141{
142 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
143 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
144 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
145 DS * 8, ES * 8, FS * 8, GS * 8
146};
147
148#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
149
150/* So code below doesn't have to care, i386 or amd64. */
151#define ORIG_EAX ORIG_RAX
bc9540e8 152#define REGSIZE 8
d0722149
DE
153
154static const int x86_64_regmap[] =
155{
156 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
157 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
158 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
159 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
160 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
161 DS * 8, ES * 8, FS * 8, GS * 8,
162 -1, -1, -1, -1, -1, -1, -1, -1,
163 -1, -1, -1, -1, -1, -1, -1, -1,
164 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
165 -1,
166 -1, -1, -1, -1, -1, -1, -1, -1,
167 ORIG_RAX * 8,
2735833d
WT
168#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
169 21 * 8, 22 * 8,
170#else
171 -1, -1,
172#endif
a196ebeb 173 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
174 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
175 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
180 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
181 -1, -1, -1, -1, -1, -1, -1, -1,
182 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1 /* pkru */
d0722149
DE
185};
186
187#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 188#define X86_64_USER_REGS (GS + 1)
d0722149
DE
189
190#else /* ! __x86_64__ */
191
192/* Mapping between the general-purpose registers in `struct user'
193 format and GDB's register array layout. */
194static /*const*/ int i386_regmap[] =
195{
196 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
197 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
198 EIP * 4, EFL * 4, CS * 4, SS * 4,
199 DS * 4, ES * 4, FS * 4, GS * 4
200};
201
202#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
203
bc9540e8
PA
204#define REGSIZE 4
205
d0722149 206#endif
3aee8918
PA
207
208#ifdef __x86_64__
209
210/* Returns true if the current inferior belongs to a x86-64 process,
211 per the tdesc. */
212
213static int
214is_64bit_tdesc (void)
215{
0bfdf32f 216 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
217
218 return register_size (regcache->tdesc, 0) == 8;
219}
220
221#endif
222
d0722149
DE
223\f
224/* Called by libthread_db. */
225
226ps_err_e
754653a7 227ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
228 lwpid_t lwpid, int idx, void **base)
229{
230#ifdef __x86_64__
3aee8918 231 int use_64bit = is_64bit_tdesc ();
d0722149
DE
232
233 if (use_64bit)
234 {
235 switch (idx)
236 {
237 case FS:
238 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
239 return PS_OK;
240 break;
241 case GS:
242 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
243 return PS_OK;
244 break;
245 default:
246 return PS_BADADDR;
247 }
248 return PS_ERR;
249 }
250#endif
251
252 {
253 unsigned int desc[4];
254
255 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
256 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
257 return PS_ERR;
258
d1ec4ce7
DE
259 /* Ensure we properly extend the value to 64-bits for x86_64. */
260 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
261 return PS_OK;
262 }
263}
fa593d66
PA
264
265/* Get the thread area address. This is used to recognize which
266 thread is which when tracing with the in-process agent library. We
267 don't read anything from the address, and treat it as opaque; it's
268 the address itself that we assume is unique per-thread. */
269
270static int
271x86_get_thread_area (int lwpid, CORE_ADDR *addr)
272{
273#ifdef __x86_64__
3aee8918 274 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
275
276 if (use_64bit)
277 {
278 void *base;
279 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
280 {
281 *addr = (CORE_ADDR) (uintptr_t) base;
282 return 0;
283 }
284
285 return -1;
286 }
287#endif
288
289 {
f2907e49 290 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
291 struct thread_info *thr = get_lwp_thread (lwp);
292 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
293 unsigned int desc[4];
294 ULONGEST gs = 0;
295 const int reg_thread_area = 3; /* bits to scale down register value. */
296 int idx;
297
298 collect_register_by_name (regcache, "gs", &gs);
299
300 idx = gs >> reg_thread_area;
301
302 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 303 lwpid_of (thr),
493e2a69 304 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
305 return -1;
306
307 *addr = desc[1];
308 return 0;
309 }
310}
311
312
d0722149 313\f
daca57a7
TBA
314bool
315x86_target::low_cannot_store_register (int regno)
d0722149 316{
3aee8918
PA
317#ifdef __x86_64__
318 if (is_64bit_tdesc ())
daca57a7 319 return false;
3aee8918
PA
320#endif
321
d0722149
DE
322 return regno >= I386_NUM_REGS;
323}
324
daca57a7
TBA
325bool
326x86_target::low_cannot_fetch_register (int regno)
d0722149 327{
3aee8918
PA
328#ifdef __x86_64__
329 if (is_64bit_tdesc ())
daca57a7 330 return false;
3aee8918
PA
331#endif
332
d0722149
DE
333 return regno >= I386_NUM_REGS;
334}
335
336static void
442ea881 337x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
338{
339 int i;
340
341#ifdef __x86_64__
3aee8918 342 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
343 {
344 for (i = 0; i < X86_64_NUM_REGS; i++)
345 if (x86_64_regmap[i] != -1)
442ea881 346 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
347
348#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
349 {
350 unsigned long base;
351 int lwpid = lwpid_of (current_thread);
352
353 collect_register_by_name (regcache, "fs_base", &base);
354 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
355
356 collect_register_by_name (regcache, "gs_base", &base);
357 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
358 }
359#endif
360
d0722149
DE
361 return;
362 }
9e0aa64f
JK
363
364 /* 32-bit inferior registers need to be zero-extended.
365 Callers would read uninitialized memory otherwise. */
366 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
367#endif
368
369 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 370 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 371
442ea881 372 collect_register_by_name (regcache, "orig_eax",
bc9540e8 373 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 374
e90a813d 375#ifdef __x86_64__
3f52fdbc
KB
376 /* Sign extend EAX value to avoid potential syscall restart
377 problems.
378
379 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
380 for a detailed explanation. */
381 if (register_size (regcache->tdesc, 0) == 4)
382 {
383 void *ptr = ((gdb_byte *) buf
384 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
385
386 *(int64_t *) ptr = *(int32_t *) ptr;
387 }
e90a813d 388#endif
d0722149
DE
389}
390
391static void
442ea881 392x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
393{
394 int i;
395
396#ifdef __x86_64__
3aee8918 397 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
398 {
399 for (i = 0; i < X86_64_NUM_REGS; i++)
400 if (x86_64_regmap[i] != -1)
442ea881 401 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
402
403#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
404 {
405 unsigned long base;
406 int lwpid = lwpid_of (current_thread);
407
408 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
409 supply_register_by_name (regcache, "fs_base", &base);
410
411 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
412 supply_register_by_name (regcache, "gs_base", &base);
413 }
414#endif
d0722149
DE
415 return;
416 }
417#endif
418
419 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 420 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 421
442ea881 422 supply_register_by_name (regcache, "orig_eax",
bc9540e8 423 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
424}
425
426static void
442ea881 427x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
428{
429#ifdef __x86_64__
442ea881 430 i387_cache_to_fxsave (regcache, buf);
d0722149 431#else
442ea881 432 i387_cache_to_fsave (regcache, buf);
d0722149
DE
433#endif
434}
435
436static void
442ea881 437x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
438{
439#ifdef __x86_64__
442ea881 440 i387_fxsave_to_cache (regcache, buf);
d0722149 441#else
442ea881 442 i387_fsave_to_cache (regcache, buf);
d0722149
DE
443#endif
444}
445
446#ifndef __x86_64__
447
448static void
442ea881 449x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 450{
442ea881 451 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
452}
453
454static void
442ea881 455x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 456{
442ea881 457 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
458}
459
460#endif
461
1570b33e
L
462static void
463x86_fill_xstateregset (struct regcache *regcache, void *buf)
464{
465 i387_cache_to_xsave (regcache, buf);
466}
467
468static void
469x86_store_xstateregset (struct regcache *regcache, const void *buf)
470{
471 i387_xsave_to_cache (regcache, buf);
472}
473
d0722149
DE
474/* ??? The non-biarch i386 case stores all the i387 regs twice.
475 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
476 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
477 doesn't work. IWBN to avoid the duplication in the case where it
478 does work. Maybe the arch_setup routine could check whether it works
3aee8918 479 and update the supported regsets accordingly. */
d0722149 480
3aee8918 481static struct regset_info x86_regsets[] =
d0722149
DE
482{
483#ifdef HAVE_PTRACE_GETREGS
1570b33e 484 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
485 GENERAL_REGS,
486 x86_fill_gregset, x86_store_gregset },
1570b33e
L
487 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
488 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
489# ifndef __x86_64__
490# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 491 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
492 EXTENDED_REGS,
493 x86_fill_fpxregset, x86_store_fpxregset },
494# endif
495# endif
1570b33e 496 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
497 FP_REGS,
498 x86_fill_fpregset, x86_store_fpregset },
499#endif /* HAVE_PTRACE_GETREGS */
50bc912a 500 NULL_REGSET
d0722149
DE
501};
502
bf9ae9d8
TBA
503bool
504x86_target::low_supports_breakpoints ()
505{
506 return true;
507}
508
509CORE_ADDR
510x86_target::low_get_pc (regcache *regcache)
d0722149 511{
3aee8918 512 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
513
514 if (use_64bit)
515 {
6598661d
PA
516 uint64_t pc;
517
442ea881 518 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
519 return (CORE_ADDR) pc;
520 }
521 else
522 {
6598661d
PA
523 uint32_t pc;
524
442ea881 525 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
526 return (CORE_ADDR) pc;
527 }
528}
529
bf9ae9d8
TBA
530void
531x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 532{
3aee8918 533 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
534
535 if (use_64bit)
536 {
6598661d
PA
537 uint64_t newpc = pc;
538
442ea881 539 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
540 }
541 else
542 {
6598661d
PA
543 uint32_t newpc = pc;
544
442ea881 545 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
546 }
547}
548\f
dd373349 549static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
550#define x86_breakpoint_len 1
551
552static int
553x86_breakpoint_at (CORE_ADDR pc)
554{
555 unsigned char c;
556
52405d85 557 the_target->read_memory (pc, &c, 1);
d0722149
DE
558 if (c == 0xCC)
559 return 1;
560
561 return 0;
562}
563\f
42995dbd 564/* Low-level function vector. */
df7e5265 565struct x86_dr_low_type x86_dr_low =
42995dbd 566 {
d33472ad
GB
567 x86_linux_dr_set_control,
568 x86_linux_dr_set_addr,
569 x86_linux_dr_get_addr,
570 x86_linux_dr_get_status,
571 x86_linux_dr_get_control,
42995dbd
GB
572 sizeof (void *),
573 };
aa5ca48f 574\f
90d74c30 575/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
576
577static int
802e8e6d
PA
578x86_supports_z_point_type (char z_type)
579{
580 switch (z_type)
581 {
582 case Z_PACKET_SW_BP:
583 case Z_PACKET_HW_BP:
584 case Z_PACKET_WRITE_WP:
585 case Z_PACKET_ACCESS_WP:
586 return 1;
587 default:
588 return 0;
589 }
590}
591
592static int
593x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
594 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
595{
596 struct process_info *proc = current_process ();
802e8e6d 597
aa5ca48f
DE
598 switch (type)
599 {
802e8e6d
PA
600 case raw_bkpt_type_hw:
601 case raw_bkpt_type_write_wp:
602 case raw_bkpt_type_access_wp:
a4165e94 603 {
802e8e6d
PA
604 enum target_hw_bp_type hw_type
605 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 606 struct x86_debug_reg_state *state
fe978cb0 607 = &proc->priv->arch_private->debug_reg_state;
a4165e94 608
df7e5265 609 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 610 }
961bd387 611
aa5ca48f
DE
612 default:
613 /* Unsupported. */
614 return 1;
615 }
616}
617
618static int
802e8e6d
PA
619x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
620 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
621{
622 struct process_info *proc = current_process ();
802e8e6d 623
aa5ca48f
DE
624 switch (type)
625 {
802e8e6d
PA
626 case raw_bkpt_type_hw:
627 case raw_bkpt_type_write_wp:
628 case raw_bkpt_type_access_wp:
a4165e94 629 {
802e8e6d
PA
630 enum target_hw_bp_type hw_type
631 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 632 struct x86_debug_reg_state *state
fe978cb0 633 = &proc->priv->arch_private->debug_reg_state;
a4165e94 634
df7e5265 635 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 636 }
aa5ca48f
DE
637 default:
638 /* Unsupported. */
639 return 1;
640 }
641}
642
643static int
644x86_stopped_by_watchpoint (void)
645{
646 struct process_info *proc = current_process ();
fe978cb0 647 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
648}
649
650static CORE_ADDR
651x86_stopped_data_address (void)
652{
653 struct process_info *proc = current_process ();
654 CORE_ADDR addr;
fe978cb0 655 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 656 &addr))
aa5ca48f
DE
657 return addr;
658 return 0;
659}
660\f
661/* Called when a new process is created. */
662
663static struct arch_process_info *
664x86_linux_new_process (void)
665{
ed859da7 666 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 667
df7e5265 668 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
669
670 return info;
671}
672
04ec7890
SM
673/* Called when a process is being deleted. */
674
675static void
676x86_linux_delete_process (struct arch_process_info *info)
677{
678 xfree (info);
679}
680
3a8a0396
DB
681/* Target routine for linux_new_fork. */
682
683static void
684x86_linux_new_fork (struct process_info *parent, struct process_info *child)
685{
686 /* These are allocated by linux_add_process. */
687 gdb_assert (parent->priv != NULL
688 && parent->priv->arch_private != NULL);
689 gdb_assert (child->priv != NULL
690 && child->priv->arch_private != NULL);
691
692 /* Linux kernel before 2.6.33 commit
693 72f674d203cd230426437cdcf7dd6f681dad8b0d
694 will inherit hardware debug registers from parent
695 on fork/vfork/clone. Newer Linux kernels create such tasks with
696 zeroed debug registers.
697
698 GDB core assumes the child inherits the watchpoints/hw
699 breakpoints of the parent, and will remove them all from the
700 forked off process. Copy the debug registers mirrors into the
701 new process so that all breakpoints and watchpoints can be
702 removed together. The debug registers mirror will become zeroed
703 in the end before detaching the forked off process, thus making
704 this compatible with older Linux kernels too. */
705
706 *child->priv->arch_private = *parent->priv->arch_private;
707}
708
70a0bb6b
GB
709/* See nat/x86-dregs.h. */
710
711struct x86_debug_reg_state *
712x86_debug_reg_state (pid_t pid)
713{
714 struct process_info *proc = find_process_pid (pid);
715
716 return &proc->priv->arch_private->debug_reg_state;
717}
aa5ca48f 718\f
d0722149
DE
719/* When GDBSERVER is built as a 64-bit application on linux, the
720 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
721 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
722 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
723 conversion in-place ourselves. */
724
9cf12d57 725/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
726 layout of the inferiors' architecture. Returns true if any
727 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 728 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
729 INF. */
730
731static int
9cf12d57 732x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
733{
734#ifdef __x86_64__
760256f9 735 unsigned int machine;
0bfdf32f 736 int tid = lwpid_of (current_thread);
760256f9
PA
737 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
738
d0722149 739 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 740 if (!is_64bit_tdesc ())
9cf12d57 741 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 742 FIXUP_32);
c92b5177 743 /* No fixup for native x32 GDB. */
760256f9 744 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 745 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 746 FIXUP_X32);
d0722149
DE
747#endif
748
749 return 0;
750}
751\f
1570b33e
L
752static int use_xml;
753
3aee8918
PA
754/* Format of XSAVE extended state is:
755 struct
756 {
757 fxsave_bytes[0..463]
758 sw_usable_bytes[464..511]
759 xstate_hdr_bytes[512..575]
760 avx_bytes[576..831]
761 future_state etc
762 };
763
764 Same memory layout will be used for the coredump NT_X86_XSTATE
765 representing the XSAVE extended state registers.
766
767 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
768 extended state mask, which is the same as the extended control register
769 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
770 together with the mask saved in the xstate_hdr_bytes to determine what
771 states the processor/OS supports and what state, used or initialized,
772 the process/thread is in. */
773#define I386_LINUX_XSAVE_XCR0_OFFSET 464
774
775/* Does the current host support the GETFPXREGS request? The header
776 file may or may not define it, and even if it is defined, the
777 kernel will return EIO if it's running on a pre-SSE processor. */
778int have_ptrace_getfpxregs =
779#ifdef HAVE_PTRACE_GETFPXREGS
780 -1
781#else
782 0
783#endif
784;
1570b33e 785
3aee8918
PA
786/* Get Linux/x86 target description from running target. */
787
788static const struct target_desc *
789x86_linux_read_description (void)
1570b33e 790{
3aee8918
PA
791 unsigned int machine;
792 int is_elf64;
a196ebeb 793 int xcr0_features;
3aee8918
PA
794 int tid;
795 static uint64_t xcr0;
3a13a53b 796 struct regset_info *regset;
1570b33e 797
0bfdf32f 798 tid = lwpid_of (current_thread);
1570b33e 799
3aee8918 800 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 801
3aee8918 802 if (sizeof (void *) == 4)
3a13a53b 803 {
3aee8918
PA
804 if (is_elf64 > 0)
805 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
806#ifndef __x86_64__
807 else if (machine == EM_X86_64)
808 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
809#endif
810 }
3a13a53b 811
3aee8918
PA
812#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
813 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
814 {
815 elf_fpxregset_t fpxregs;
3a13a53b 816
3aee8918 817 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 818 {
3aee8918
PA
819 have_ptrace_getfpxregs = 0;
820 have_ptrace_getregset = 0;
f49ff000 821 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 822 }
3aee8918
PA
823 else
824 have_ptrace_getfpxregs = 1;
3a13a53b 825 }
1570b33e
L
826#endif
827
828 if (!use_xml)
829 {
df7e5265 830 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 831
1570b33e
L
832 /* Don't use XML. */
833#ifdef __x86_64__
3aee8918
PA
834 if (machine == EM_X86_64)
835 return tdesc_amd64_linux_no_xml;
1570b33e 836 else
1570b33e 837#endif
3aee8918 838 return tdesc_i386_linux_no_xml;
1570b33e
L
839 }
840
1570b33e
L
841 if (have_ptrace_getregset == -1)
842 {
df7e5265 843 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 844 struct iovec iov;
1570b33e
L
845
846 iov.iov_base = xstateregs;
847 iov.iov_len = sizeof (xstateregs);
848
849 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
850 if (ptrace (PTRACE_GETREGSET, tid,
851 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
852 have_ptrace_getregset = 0;
853 else
1570b33e 854 {
3aee8918
PA
855 have_ptrace_getregset = 1;
856
857 /* Get XCR0 from XSAVE extended state. */
858 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
859 / sizeof (uint64_t))];
860
861 /* Use PTRACE_GETREGSET if it is available. */
862 for (regset = x86_regsets;
863 regset->fill_function != NULL; regset++)
864 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 865 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
866 else if (regset->type != GENERAL_REGS)
867 regset->size = 0;
1570b33e 868 }
1570b33e
L
869 }
870
3aee8918 871 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 872 xcr0_features = (have_ptrace_getregset
2e1e43e1 873 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 874
a196ebeb 875 if (xcr0_features)
3aee8918 876 x86_xcr0 = xcr0;
1570b33e 877
3aee8918
PA
878 if (machine == EM_X86_64)
879 {
1570b33e 880#ifdef __x86_64__
b4570e4b 881 const target_desc *tdesc = NULL;
a196ebeb 882
b4570e4b 883 if (xcr0_features)
3aee8918 884 {
b4570e4b
YQ
885 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
886 !is_elf64);
1570b33e 887 }
b4570e4b
YQ
888
889 if (tdesc == NULL)
890 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
891 return tdesc;
3aee8918 892#endif
1570b33e 893 }
3aee8918
PA
894 else
895 {
f49ff000 896 const target_desc *tdesc = NULL;
a1fa17ee 897
f49ff000
YQ
898 if (xcr0_features)
899 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 900
f49ff000
YQ
901 if (tdesc == NULL)
902 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 903
f49ff000 904 return tdesc;
3aee8918
PA
905 }
906
907 gdb_assert_not_reached ("failed to return tdesc");
908}
909
3aee8918
PA
910/* Update all the target description of all processes; a new GDB
911 connected, and it may or not support xml target descriptions. */
912
797bcff5
TBA
913void
914x86_target::update_xmltarget ()
3aee8918 915{
0bfdf32f 916 struct thread_info *saved_thread = current_thread;
3aee8918
PA
917
918 /* Before changing the register cache's internal layout, flush the
919 contents of the current valid caches back to the threads, and
920 release the current regcache objects. */
921 regcache_release ();
922
797bcff5 923 for_each_process ([this] (process_info *proc) {
9179355e
SM
924 int pid = proc->pid;
925
926 /* Look up any thread of this process. */
927 current_thread = find_any_thread_of_pid (pid);
928
797bcff5 929 low_arch_setup ();
9179355e 930 });
3aee8918 931
0bfdf32f 932 current_thread = saved_thread;
1570b33e
L
933}
934
935/* Process qSupported query, "xmlRegisters=". Update the buffer size for
936 PTRACE_GETREGSET. */
937
938static void
06e03fff 939x86_linux_process_qsupported (char **features, int count)
1570b33e 940{
06e03fff
PA
941 int i;
942
1570b33e
L
943 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
944 with "i386" in qSupported query, it supports x86 XML target
945 descriptions. */
946 use_xml = 0;
06e03fff 947 for (i = 0; i < count; i++)
1570b33e 948 {
06e03fff 949 const char *feature = features[i];
1570b33e 950
06e03fff 951 if (startswith (feature, "xmlRegisters="))
1570b33e 952 {
06e03fff 953 char *copy = xstrdup (feature + 13);
06e03fff 954
ca3a04f6
CB
955 char *saveptr;
956 for (char *p = strtok_r (copy, ",", &saveptr);
957 p != NULL;
958 p = strtok_r (NULL, ",", &saveptr))
1570b33e 959 {
06e03fff
PA
960 if (strcmp (p, "i386") == 0)
961 {
962 use_xml = 1;
963 break;
964 }
1570b33e 965 }
1570b33e 966
06e03fff
PA
967 free (copy);
968 }
1570b33e 969 }
797bcff5 970 the_x86_target.update_xmltarget ();
1570b33e
L
971}
972
3aee8918 973/* Common for x86/x86-64. */
d0722149 974
3aee8918
PA
975static struct regsets_info x86_regsets_info =
976 {
977 x86_regsets, /* regsets */
978 0, /* num_regsets */
979 NULL, /* disabled_regsets */
980 };
214d508e
L
981
982#ifdef __x86_64__
3aee8918
PA
983static struct regs_info amd64_linux_regs_info =
984 {
985 NULL, /* regset_bitmap */
986 NULL, /* usrregs_info */
987 &x86_regsets_info
988 };
d0722149 989#endif
3aee8918
PA
990static struct usrregs_info i386_linux_usrregs_info =
991 {
992 I386_NUM_REGS,
993 i386_regmap,
994 };
d0722149 995
3aee8918
PA
996static struct regs_info i386_linux_regs_info =
997 {
998 NULL, /* regset_bitmap */
999 &i386_linux_usrregs_info,
1000 &x86_regsets_info
1001 };
d0722149 1002
aa8d21c9
TBA
1003const regs_info *
1004x86_target::get_regs_info ()
3aee8918
PA
1005{
1006#ifdef __x86_64__
1007 if (is_64bit_tdesc ())
1008 return &amd64_linux_regs_info;
1009 else
1010#endif
1011 return &i386_linux_regs_info;
1012}
d0722149 1013
3aee8918
PA
1014/* Initialize the target description for the architecture of the
1015 inferior. */
1570b33e 1016
797bcff5
TBA
1017void
1018x86_target::low_arch_setup ()
3aee8918
PA
1019{
1020 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1021}
1022
82075af2
JS
1023/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1024 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1025
1026static void
4cc32bec 1027x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1028{
1029 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1030
1031 if (use_64bit)
1032 {
1033 long l_sysno;
82075af2
JS
1034
1035 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1036 *sysno = (int) l_sysno;
82075af2
JS
1037 }
1038 else
4cc32bec 1039 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1040}
1041
219f2f23
PA
1042static int
1043x86_supports_tracepoints (void)
1044{
1045 return 1;
1046}
1047
fa593d66
PA
1048static void
1049append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1050{
4196ab2a 1051 target_write_memory (*to, buf, len);
fa593d66
PA
1052 *to += len;
1053}
1054
1055static int
a121b7c1 1056push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1057{
1058 unsigned char *buf_org = buf;
1059
1060 while (1)
1061 {
1062 char *endptr;
1063 unsigned long ul = strtoul (op, &endptr, 16);
1064
1065 if (endptr == op)
1066 break;
1067
1068 *buf++ = ul;
1069 op = endptr;
1070 }
1071
1072 return buf - buf_org;
1073}
1074
1075#ifdef __x86_64__
1076
1077/* Build a jump pad that saves registers and calls a collection
1078 function. Writes a jump instruction to the jump pad to
1079 JJUMPAD_INSN. The caller is responsible to write it in at the
1080 tracepoint address. */
1081
1082static int
1083amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1084 CORE_ADDR collector,
1085 CORE_ADDR lockaddr,
1086 ULONGEST orig_size,
1087 CORE_ADDR *jump_entry,
405f8e94
SS
1088 CORE_ADDR *trampoline,
1089 ULONGEST *trampoline_size,
fa593d66
PA
1090 unsigned char *jjump_pad_insn,
1091 ULONGEST *jjump_pad_insn_size,
1092 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1093 CORE_ADDR *adjusted_insn_addr_end,
1094 char *err)
fa593d66
PA
1095{
1096 unsigned char buf[40];
1097 int i, offset;
f4647387
YQ
1098 int64_t loffset;
1099
fa593d66
PA
1100 CORE_ADDR buildaddr = *jump_entry;
1101
1102 /* Build the jump pad. */
1103
1104 /* First, do tracepoint data collection. Save registers. */
1105 i = 0;
1106 /* Need to ensure stack pointer saved first. */
1107 buf[i++] = 0x54; /* push %rsp */
1108 buf[i++] = 0x55; /* push %rbp */
1109 buf[i++] = 0x57; /* push %rdi */
1110 buf[i++] = 0x56; /* push %rsi */
1111 buf[i++] = 0x52; /* push %rdx */
1112 buf[i++] = 0x51; /* push %rcx */
1113 buf[i++] = 0x53; /* push %rbx */
1114 buf[i++] = 0x50; /* push %rax */
1115 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1116 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1117 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1118 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1119 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1120 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1121 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1122 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1123 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1124 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1125 buf[i++] = 0xbf;
c8ef42ee
PA
1126 memcpy (buf + i, &tpaddr, 8);
1127 i += 8;
fa593d66
PA
1128 buf[i++] = 0x57; /* push %rdi */
1129 append_insns (&buildaddr, i, buf);
1130
1131 /* Stack space for the collecting_t object. */
1132 i = 0;
1133 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1134 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1135 memcpy (buf + i, &tpoint, 8);
1136 i += 8;
1137 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1138 i += push_opcode (&buf[i],
1139 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1140 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1141 append_insns (&buildaddr, i, buf);
1142
1143 /* spin-lock. */
1144 i = 0;
1145 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1146 memcpy (&buf[i], (void *) &lockaddr, 8);
1147 i += 8;
1148 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1149 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1150 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1151 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1152 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1153 append_insns (&buildaddr, i, buf);
1154
1155 /* Set up the gdb_collect call. */
1156 /* At this point, (stack pointer + 0x18) is the base of our saved
1157 register block. */
1158
1159 i = 0;
1160 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1161 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1162
1163 /* tpoint address may be 64-bit wide. */
1164 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1165 memcpy (buf + i, &tpoint, 8);
1166 i += 8;
1167 append_insns (&buildaddr, i, buf);
1168
1169 /* The collector function being in the shared library, may be
1170 >31-bits away off the jump pad. */
1171 i = 0;
1172 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1173 memcpy (buf + i, &collector, 8);
1174 i += 8;
1175 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1176 append_insns (&buildaddr, i, buf);
1177
1178 /* Clear the spin-lock. */
1179 i = 0;
1180 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1181 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1182 memcpy (buf + i, &lockaddr, 8);
1183 i += 8;
1184 append_insns (&buildaddr, i, buf);
1185
1186 /* Remove stack that had been used for the collect_t object. */
1187 i = 0;
1188 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1189 append_insns (&buildaddr, i, buf);
1190
1191 /* Restore register state. */
1192 i = 0;
1193 buf[i++] = 0x48; /* add $0x8,%rsp */
1194 buf[i++] = 0x83;
1195 buf[i++] = 0xc4;
1196 buf[i++] = 0x08;
1197 buf[i++] = 0x9d; /* popfq */
1198 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1199 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1200 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1201 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1202 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1203 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1204 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1205 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1206 buf[i++] = 0x58; /* pop %rax */
1207 buf[i++] = 0x5b; /* pop %rbx */
1208 buf[i++] = 0x59; /* pop %rcx */
1209 buf[i++] = 0x5a; /* pop %rdx */
1210 buf[i++] = 0x5e; /* pop %rsi */
1211 buf[i++] = 0x5f; /* pop %rdi */
1212 buf[i++] = 0x5d; /* pop %rbp */
1213 buf[i++] = 0x5c; /* pop %rsp */
1214 append_insns (&buildaddr, i, buf);
1215
1216 /* Now, adjust the original instruction to execute in the jump
1217 pad. */
1218 *adjusted_insn_addr = buildaddr;
1219 relocate_instruction (&buildaddr, tpaddr);
1220 *adjusted_insn_addr_end = buildaddr;
1221
1222 /* Finally, write a jump back to the program. */
f4647387
YQ
1223
1224 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1225 if (loffset > INT_MAX || loffset < INT_MIN)
1226 {
1227 sprintf (err,
1228 "E.Jump back from jump pad too far from tracepoint "
1229 "(offset 0x%" PRIx64 " > int32).", loffset);
1230 return 1;
1231 }
1232
1233 offset = (int) loffset;
fa593d66
PA
1234 memcpy (buf, jump_insn, sizeof (jump_insn));
1235 memcpy (buf + 1, &offset, 4);
1236 append_insns (&buildaddr, sizeof (jump_insn), buf);
1237
1238 /* The jump pad is now built. Wire in a jump to our jump pad. This
1239 is always done last (by our caller actually), so that we can
1240 install fast tracepoints with threads running. This relies on
1241 the agent's atomic write support. */
f4647387
YQ
1242 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1243 if (loffset > INT_MAX || loffset < INT_MIN)
1244 {
1245 sprintf (err,
1246 "E.Jump pad too far from tracepoint "
1247 "(offset 0x%" PRIx64 " > int32).", loffset);
1248 return 1;
1249 }
1250
1251 offset = (int) loffset;
1252
fa593d66
PA
1253 memcpy (buf, jump_insn, sizeof (jump_insn));
1254 memcpy (buf + 1, &offset, 4);
1255 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1256 *jjump_pad_insn_size = sizeof (jump_insn);
1257
1258 /* Return the end address of our pad. */
1259 *jump_entry = buildaddr;
1260
1261 return 0;
1262}
1263
1264#endif /* __x86_64__ */
1265
1266/* Build a jump pad that saves registers and calls a collection
1267 function. Writes a jump instruction to the jump pad to
1268 JJUMPAD_INSN. The caller is responsible to write it in at the
1269 tracepoint address. */
1270
1271static int
1272i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1273 CORE_ADDR collector,
1274 CORE_ADDR lockaddr,
1275 ULONGEST orig_size,
1276 CORE_ADDR *jump_entry,
405f8e94
SS
1277 CORE_ADDR *trampoline,
1278 ULONGEST *trampoline_size,
fa593d66
PA
1279 unsigned char *jjump_pad_insn,
1280 ULONGEST *jjump_pad_insn_size,
1281 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1282 CORE_ADDR *adjusted_insn_addr_end,
1283 char *err)
fa593d66
PA
1284{
1285 unsigned char buf[0x100];
1286 int i, offset;
1287 CORE_ADDR buildaddr = *jump_entry;
1288
1289 /* Build the jump pad. */
1290
1291 /* First, do tracepoint data collection. Save registers. */
1292 i = 0;
1293 buf[i++] = 0x60; /* pushad */
1294 buf[i++] = 0x68; /* push tpaddr aka $pc */
1295 *((int *)(buf + i)) = (int) tpaddr;
1296 i += 4;
1297 buf[i++] = 0x9c; /* pushf */
1298 buf[i++] = 0x1e; /* push %ds */
1299 buf[i++] = 0x06; /* push %es */
1300 buf[i++] = 0x0f; /* push %fs */
1301 buf[i++] = 0xa0;
1302 buf[i++] = 0x0f; /* push %gs */
1303 buf[i++] = 0xa8;
1304 buf[i++] = 0x16; /* push %ss */
1305 buf[i++] = 0x0e; /* push %cs */
1306 append_insns (&buildaddr, i, buf);
1307
1308 /* Stack space for the collecting_t object. */
1309 i = 0;
1310 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1311
1312 /* Build the object. */
1313 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1314 memcpy (buf + i, &tpoint, 4);
1315 i += 4;
1316 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1317
1318 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1319 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1320 append_insns (&buildaddr, i, buf);
1321
1322 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1323 If we cared for it, this could be using xchg alternatively. */
1324
1325 i = 0;
1326 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1327 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1328 %esp,<lockaddr> */
1329 memcpy (&buf[i], (void *) &lockaddr, 4);
1330 i += 4;
1331 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1332 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1333 append_insns (&buildaddr, i, buf);
1334
1335
1336 /* Set up arguments to the gdb_collect call. */
1337 i = 0;
1338 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1339 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1340 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1341 append_insns (&buildaddr, i, buf);
1342
1343 i = 0;
1344 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1345 append_insns (&buildaddr, i, buf);
1346
1347 i = 0;
1348 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1349 memcpy (&buf[i], (void *) &tpoint, 4);
1350 i += 4;
1351 append_insns (&buildaddr, i, buf);
1352
1353 buf[0] = 0xe8; /* call <reladdr> */
1354 offset = collector - (buildaddr + sizeof (jump_insn));
1355 memcpy (buf + 1, &offset, 4);
1356 append_insns (&buildaddr, 5, buf);
1357 /* Clean up after the call. */
1358 buf[0] = 0x83; /* add $0x8,%esp */
1359 buf[1] = 0xc4;
1360 buf[2] = 0x08;
1361 append_insns (&buildaddr, 3, buf);
1362
1363
1364 /* Clear the spin-lock. This would need the LOCK prefix on older
1365 broken archs. */
1366 i = 0;
1367 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1368 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1369 memcpy (buf + i, &lockaddr, 4);
1370 i += 4;
1371 append_insns (&buildaddr, i, buf);
1372
1373
1374 /* Remove stack that had been used for the collect_t object. */
1375 i = 0;
1376 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1377 append_insns (&buildaddr, i, buf);
1378
1379 i = 0;
1380 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1381 buf[i++] = 0xc4;
1382 buf[i++] = 0x04;
1383 buf[i++] = 0x17; /* pop %ss */
1384 buf[i++] = 0x0f; /* pop %gs */
1385 buf[i++] = 0xa9;
1386 buf[i++] = 0x0f; /* pop %fs */
1387 buf[i++] = 0xa1;
1388 buf[i++] = 0x07; /* pop %es */
405f8e94 1389 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1390 buf[i++] = 0x9d; /* popf */
1391 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1392 buf[i++] = 0xc4;
1393 buf[i++] = 0x04;
1394 buf[i++] = 0x61; /* popad */
1395 append_insns (&buildaddr, i, buf);
1396
1397 /* Now, adjust the original instruction to execute in the jump
1398 pad. */
1399 *adjusted_insn_addr = buildaddr;
1400 relocate_instruction (&buildaddr, tpaddr);
1401 *adjusted_insn_addr_end = buildaddr;
1402
1403 /* Write the jump back to the program. */
1404 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1405 memcpy (buf, jump_insn, sizeof (jump_insn));
1406 memcpy (buf + 1, &offset, 4);
1407 append_insns (&buildaddr, sizeof (jump_insn), buf);
1408
1409 /* The jump pad is now built. Wire in a jump to our jump pad. This
1410 is always done last (by our caller actually), so that we can
1411 install fast tracepoints with threads running. This relies on
1412 the agent's atomic write support. */
405f8e94
SS
1413 if (orig_size == 4)
1414 {
1415 /* Create a trampoline. */
1416 *trampoline_size = sizeof (jump_insn);
1417 if (!claim_trampoline_space (*trampoline_size, trampoline))
1418 {
1419 /* No trampoline space available. */
1420 strcpy (err,
1421 "E.Cannot allocate trampoline space needed for fast "
1422 "tracepoints on 4-byte instructions.");
1423 return 1;
1424 }
1425
1426 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1427 memcpy (buf, jump_insn, sizeof (jump_insn));
1428 memcpy (buf + 1, &offset, 4);
4196ab2a 1429 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1430
1431 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1432 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1433 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1434 memcpy (buf + 2, &offset, 2);
1435 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1436 *jjump_pad_insn_size = sizeof (small_jump_insn);
1437 }
1438 else
1439 {
1440 /* Else use a 32-bit relative jump instruction. */
1441 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1442 memcpy (buf, jump_insn, sizeof (jump_insn));
1443 memcpy (buf + 1, &offset, 4);
1444 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1445 *jjump_pad_insn_size = sizeof (jump_insn);
1446 }
fa593d66
PA
1447
1448 /* Return the end address of our pad. */
1449 *jump_entry = buildaddr;
1450
1451 return 0;
1452}
1453
1454static int
1455x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1456 CORE_ADDR collector,
1457 CORE_ADDR lockaddr,
1458 ULONGEST orig_size,
1459 CORE_ADDR *jump_entry,
405f8e94
SS
1460 CORE_ADDR *trampoline,
1461 ULONGEST *trampoline_size,
fa593d66
PA
1462 unsigned char *jjump_pad_insn,
1463 ULONGEST *jjump_pad_insn_size,
1464 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1465 CORE_ADDR *adjusted_insn_addr_end,
1466 char *err)
fa593d66
PA
1467{
1468#ifdef __x86_64__
3aee8918 1469 if (is_64bit_tdesc ())
fa593d66
PA
1470 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1471 collector, lockaddr,
1472 orig_size, jump_entry,
405f8e94 1473 trampoline, trampoline_size,
fa593d66
PA
1474 jjump_pad_insn,
1475 jjump_pad_insn_size,
1476 adjusted_insn_addr,
405f8e94
SS
1477 adjusted_insn_addr_end,
1478 err);
fa593d66
PA
1479#endif
1480
1481 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1482 collector, lockaddr,
1483 orig_size, jump_entry,
405f8e94 1484 trampoline, trampoline_size,
fa593d66
PA
1485 jjump_pad_insn,
1486 jjump_pad_insn_size,
1487 adjusted_insn_addr,
405f8e94
SS
1488 adjusted_insn_addr_end,
1489 err);
1490}
1491
1492/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1493 architectures. */
1494
1495static int
1496x86_get_min_fast_tracepoint_insn_len (void)
1497{
1498 static int warned_about_fast_tracepoints = 0;
1499
1500#ifdef __x86_64__
1501 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1502 used for fast tracepoints. */
3aee8918 1503 if (is_64bit_tdesc ())
405f8e94
SS
1504 return 5;
1505#endif
1506
58b4daa5 1507 if (agent_loaded_p ())
405f8e94
SS
1508 {
1509 char errbuf[IPA_BUFSIZ];
1510
1511 errbuf[0] = '\0';
1512
1513 /* On x86, if trampolines are available, then 4-byte jump instructions
1514 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1515 with a 4-byte offset are used instead. */
1516 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1517 return 4;
1518 else
1519 {
1520 /* GDB has no channel to explain to user why a shorter fast
1521 tracepoint is not possible, but at least make GDBserver
1522 mention that something has gone awry. */
1523 if (!warned_about_fast_tracepoints)
1524 {
422186a9 1525 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1526 warned_about_fast_tracepoints = 1;
1527 }
1528 return 5;
1529 }
1530 }
1531 else
1532 {
1533 /* Indicate that the minimum length is currently unknown since the IPA
1534 has not loaded yet. */
1535 return 0;
1536 }
fa593d66
PA
1537}
1538
6a271cae
PA
1539static void
1540add_insns (unsigned char *start, int len)
1541{
1542 CORE_ADDR buildaddr = current_insn_ptr;
1543
1544 if (debug_threads)
87ce2a04
DE
1545 debug_printf ("Adding %d bytes of insn at %s\n",
1546 len, paddress (buildaddr));
6a271cae
PA
1547
1548 append_insns (&buildaddr, len, start);
1549 current_insn_ptr = buildaddr;
1550}
1551
6a271cae
PA
1552/* Our general strategy for emitting code is to avoid specifying raw
1553 bytes whenever possible, and instead copy a block of inline asm
1554 that is embedded in the function. This is a little messy, because
1555 we need to keep the compiler from discarding what looks like dead
1556 code, plus suppress various warnings. */
1557
9e4344e5
PA
1558#define EMIT_ASM(NAME, INSNS) \
1559 do \
1560 { \
1561 extern unsigned char start_ ## NAME, end_ ## NAME; \
1562 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1563 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1564 "\t" "start_" #NAME ":" \
1565 "\t" INSNS "\n" \
1566 "\t" "end_" #NAME ":"); \
1567 } while (0)
6a271cae
PA
1568
1569#ifdef __x86_64__
1570
1571#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1572 do \
1573 { \
1574 extern unsigned char start_ ## NAME, end_ ## NAME; \
1575 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1576 __asm__ (".code32\n" \
1577 "\t" "jmp end_" #NAME "\n" \
1578 "\t" "start_" #NAME ":\n" \
1579 "\t" INSNS "\n" \
1580 "\t" "end_" #NAME ":\n" \
1581 ".code64\n"); \
1582 } while (0)
6a271cae
PA
1583
1584#else
1585
1586#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1587
1588#endif
1589
1590#ifdef __x86_64__
1591
1592static void
1593amd64_emit_prologue (void)
1594{
1595 EMIT_ASM (amd64_prologue,
1596 "pushq %rbp\n\t"
1597 "movq %rsp,%rbp\n\t"
1598 "sub $0x20,%rsp\n\t"
1599 "movq %rdi,-8(%rbp)\n\t"
1600 "movq %rsi,-16(%rbp)");
1601}
1602
1603
1604static void
1605amd64_emit_epilogue (void)
1606{
1607 EMIT_ASM (amd64_epilogue,
1608 "movq -16(%rbp),%rdi\n\t"
1609 "movq %rax,(%rdi)\n\t"
1610 "xor %rax,%rax\n\t"
1611 "leave\n\t"
1612 "ret");
1613}
1614
1615static void
1616amd64_emit_add (void)
1617{
1618 EMIT_ASM (amd64_add,
1619 "add (%rsp),%rax\n\t"
1620 "lea 0x8(%rsp),%rsp");
1621}
1622
1623static void
1624amd64_emit_sub (void)
1625{
1626 EMIT_ASM (amd64_sub,
1627 "sub %rax,(%rsp)\n\t"
1628 "pop %rax");
1629}
1630
1631static void
1632amd64_emit_mul (void)
1633{
1634 emit_error = 1;
1635}
1636
1637static void
1638amd64_emit_lsh (void)
1639{
1640 emit_error = 1;
1641}
1642
1643static void
1644amd64_emit_rsh_signed (void)
1645{
1646 emit_error = 1;
1647}
1648
1649static void
1650amd64_emit_rsh_unsigned (void)
1651{
1652 emit_error = 1;
1653}
1654
1655static void
1656amd64_emit_ext (int arg)
1657{
1658 switch (arg)
1659 {
1660 case 8:
1661 EMIT_ASM (amd64_ext_8,
1662 "cbtw\n\t"
1663 "cwtl\n\t"
1664 "cltq");
1665 break;
1666 case 16:
1667 EMIT_ASM (amd64_ext_16,
1668 "cwtl\n\t"
1669 "cltq");
1670 break;
1671 case 32:
1672 EMIT_ASM (amd64_ext_32,
1673 "cltq");
1674 break;
1675 default:
1676 emit_error = 1;
1677 }
1678}
1679
1680static void
1681amd64_emit_log_not (void)
1682{
1683 EMIT_ASM (amd64_log_not,
1684 "test %rax,%rax\n\t"
1685 "sete %cl\n\t"
1686 "movzbq %cl,%rax");
1687}
1688
1689static void
1690amd64_emit_bit_and (void)
1691{
1692 EMIT_ASM (amd64_and,
1693 "and (%rsp),%rax\n\t"
1694 "lea 0x8(%rsp),%rsp");
1695}
1696
1697static void
1698amd64_emit_bit_or (void)
1699{
1700 EMIT_ASM (amd64_or,
1701 "or (%rsp),%rax\n\t"
1702 "lea 0x8(%rsp),%rsp");
1703}
1704
1705static void
1706amd64_emit_bit_xor (void)
1707{
1708 EMIT_ASM (amd64_xor,
1709 "xor (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1711}
1712
1713static void
1714amd64_emit_bit_not (void)
1715{
1716 EMIT_ASM (amd64_bit_not,
1717 "xorq $0xffffffffffffffff,%rax");
1718}
1719
1720static void
1721amd64_emit_equal (void)
1722{
1723 EMIT_ASM (amd64_equal,
1724 "cmp %rax,(%rsp)\n\t"
1725 "je .Lamd64_equal_true\n\t"
1726 "xor %rax,%rax\n\t"
1727 "jmp .Lamd64_equal_end\n\t"
1728 ".Lamd64_equal_true:\n\t"
1729 "mov $0x1,%rax\n\t"
1730 ".Lamd64_equal_end:\n\t"
1731 "lea 0x8(%rsp),%rsp");
1732}
1733
1734static void
1735amd64_emit_less_signed (void)
1736{
1737 EMIT_ASM (amd64_less_signed,
1738 "cmp %rax,(%rsp)\n\t"
1739 "jl .Lamd64_less_signed_true\n\t"
1740 "xor %rax,%rax\n\t"
1741 "jmp .Lamd64_less_signed_end\n\t"
1742 ".Lamd64_less_signed_true:\n\t"
1743 "mov $1,%rax\n\t"
1744 ".Lamd64_less_signed_end:\n\t"
1745 "lea 0x8(%rsp),%rsp");
1746}
1747
1748static void
1749amd64_emit_less_unsigned (void)
1750{
1751 EMIT_ASM (amd64_less_unsigned,
1752 "cmp %rax,(%rsp)\n\t"
1753 "jb .Lamd64_less_unsigned_true\n\t"
1754 "xor %rax,%rax\n\t"
1755 "jmp .Lamd64_less_unsigned_end\n\t"
1756 ".Lamd64_less_unsigned_true:\n\t"
1757 "mov $1,%rax\n\t"
1758 ".Lamd64_less_unsigned_end:\n\t"
1759 "lea 0x8(%rsp),%rsp");
1760}
1761
1762static void
1763amd64_emit_ref (int size)
1764{
1765 switch (size)
1766 {
1767 case 1:
1768 EMIT_ASM (amd64_ref1,
1769 "movb (%rax),%al");
1770 break;
1771 case 2:
1772 EMIT_ASM (amd64_ref2,
1773 "movw (%rax),%ax");
1774 break;
1775 case 4:
1776 EMIT_ASM (amd64_ref4,
1777 "movl (%rax),%eax");
1778 break;
1779 case 8:
1780 EMIT_ASM (amd64_ref8,
1781 "movq (%rax),%rax");
1782 break;
1783 }
1784}
1785
1786static void
1787amd64_emit_if_goto (int *offset_p, int *size_p)
1788{
1789 EMIT_ASM (amd64_if_goto,
1790 "mov %rax,%rcx\n\t"
1791 "pop %rax\n\t"
1792 "cmp $0,%rcx\n\t"
1793 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1794 if (offset_p)
1795 *offset_p = 10;
1796 if (size_p)
1797 *size_p = 4;
1798}
1799
1800static void
1801amd64_emit_goto (int *offset_p, int *size_p)
1802{
1803 EMIT_ASM (amd64_goto,
1804 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1805 if (offset_p)
1806 *offset_p = 1;
1807 if (size_p)
1808 *size_p = 4;
1809}
1810
1811static void
1812amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1813{
1814 int diff = (to - (from + size));
1815 unsigned char buf[sizeof (int)];
1816
1817 if (size != 4)
1818 {
1819 emit_error = 1;
1820 return;
1821 }
1822
1823 memcpy (buf, &diff, sizeof (int));
4196ab2a 1824 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1825}
1826
1827static void
4e29fb54 1828amd64_emit_const (LONGEST num)
6a271cae
PA
1829{
1830 unsigned char buf[16];
1831 int i;
1832 CORE_ADDR buildaddr = current_insn_ptr;
1833
1834 i = 0;
1835 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1836 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1837 i += 8;
1838 append_insns (&buildaddr, i, buf);
1839 current_insn_ptr = buildaddr;
1840}
1841
1842static void
1843amd64_emit_call (CORE_ADDR fn)
1844{
1845 unsigned char buf[16];
1846 int i;
1847 CORE_ADDR buildaddr;
4e29fb54 1848 LONGEST offset64;
6a271cae
PA
1849
1850 /* The destination function being in the shared library, may be
1851 >31-bits away off the compiled code pad. */
1852
1853 buildaddr = current_insn_ptr;
1854
1855 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1856
1857 i = 0;
1858
1859 if (offset64 > INT_MAX || offset64 < INT_MIN)
1860 {
1861 /* Offset is too large for a call. Use callq, but that requires
1862 a register, so avoid it if possible. Use r10, since it is
1863 call-clobbered, we don't have to push/pop it. */
1864 buf[i++] = 0x48; /* mov $fn,%r10 */
1865 buf[i++] = 0xba;
1866 memcpy (buf + i, &fn, 8);
1867 i += 8;
1868 buf[i++] = 0xff; /* callq *%r10 */
1869 buf[i++] = 0xd2;
1870 }
1871 else
1872 {
1873 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1874
1875 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1876 memcpy (buf + i, &offset32, 4);
1877 i += 4;
1878 }
1879
1880 append_insns (&buildaddr, i, buf);
1881 current_insn_ptr = buildaddr;
1882}
1883
1884static void
1885amd64_emit_reg (int reg)
1886{
1887 unsigned char buf[16];
1888 int i;
1889 CORE_ADDR buildaddr;
1890
1891 /* Assume raw_regs is still in %rdi. */
1892 buildaddr = current_insn_ptr;
1893 i = 0;
1894 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1895 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1896 i += 4;
1897 append_insns (&buildaddr, i, buf);
1898 current_insn_ptr = buildaddr;
1899 amd64_emit_call (get_raw_reg_func_addr ());
1900}
1901
1902static void
1903amd64_emit_pop (void)
1904{
1905 EMIT_ASM (amd64_pop,
1906 "pop %rax");
1907}
1908
1909static void
1910amd64_emit_stack_flush (void)
1911{
1912 EMIT_ASM (amd64_stack_flush,
1913 "push %rax");
1914}
1915
1916static void
1917amd64_emit_zero_ext (int arg)
1918{
1919 switch (arg)
1920 {
1921 case 8:
1922 EMIT_ASM (amd64_zero_ext_8,
1923 "and $0xff,%rax");
1924 break;
1925 case 16:
1926 EMIT_ASM (amd64_zero_ext_16,
1927 "and $0xffff,%rax");
1928 break;
1929 case 32:
1930 EMIT_ASM (amd64_zero_ext_32,
1931 "mov $0xffffffff,%rcx\n\t"
1932 "and %rcx,%rax");
1933 break;
1934 default:
1935 emit_error = 1;
1936 }
1937}
1938
1939static void
1940amd64_emit_swap (void)
1941{
1942 EMIT_ASM (amd64_swap,
1943 "mov %rax,%rcx\n\t"
1944 "pop %rax\n\t"
1945 "push %rcx");
1946}
1947
1948static void
1949amd64_emit_stack_adjust (int n)
1950{
1951 unsigned char buf[16];
1952 int i;
1953 CORE_ADDR buildaddr = current_insn_ptr;
1954
1955 i = 0;
1956 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1957 buf[i++] = 0x8d;
1958 buf[i++] = 0x64;
1959 buf[i++] = 0x24;
1960 /* This only handles adjustments up to 16, but we don't expect any more. */
1961 buf[i++] = n * 8;
1962 append_insns (&buildaddr, i, buf);
1963 current_insn_ptr = buildaddr;
1964}
1965
1966/* FN's prototype is `LONGEST(*fn)(int)'. */
1967
1968static void
1969amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1970{
1971 unsigned char buf[16];
1972 int i;
1973 CORE_ADDR buildaddr;
1974
1975 buildaddr = current_insn_ptr;
1976 i = 0;
1977 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1978 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1979 i += 4;
1980 append_insns (&buildaddr, i, buf);
1981 current_insn_ptr = buildaddr;
1982 amd64_emit_call (fn);
1983}
1984
4e29fb54 1985/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1986
1987static void
1988amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1989{
1990 unsigned char buf[16];
1991 int i;
1992 CORE_ADDR buildaddr;
1993
1994 buildaddr = current_insn_ptr;
1995 i = 0;
1996 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1997 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1998 i += 4;
1999 append_insns (&buildaddr, i, buf);
2000 current_insn_ptr = buildaddr;
2001 EMIT_ASM (amd64_void_call_2_a,
2002 /* Save away a copy of the stack top. */
2003 "push %rax\n\t"
2004 /* Also pass top as the second argument. */
2005 "mov %rax,%rsi");
2006 amd64_emit_call (fn);
2007 EMIT_ASM (amd64_void_call_2_b,
2008 /* Restore the stack top, %rax may have been trashed. */
2009 "pop %rax");
2010}
2011
df4a0200 2012static void
6b9801d4
SS
2013amd64_emit_eq_goto (int *offset_p, int *size_p)
2014{
2015 EMIT_ASM (amd64_eq,
2016 "cmp %rax,(%rsp)\n\t"
2017 "jne .Lamd64_eq_fallthru\n\t"
2018 "lea 0x8(%rsp),%rsp\n\t"
2019 "pop %rax\n\t"
2020 /* jmp, but don't trust the assembler to choose the right jump */
2021 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2022 ".Lamd64_eq_fallthru:\n\t"
2023 "lea 0x8(%rsp),%rsp\n\t"
2024 "pop %rax");
2025
2026 if (offset_p)
2027 *offset_p = 13;
2028 if (size_p)
2029 *size_p = 4;
2030}
2031
df4a0200 2032static void
6b9801d4
SS
2033amd64_emit_ne_goto (int *offset_p, int *size_p)
2034{
2035 EMIT_ASM (amd64_ne,
2036 "cmp %rax,(%rsp)\n\t"
2037 "je .Lamd64_ne_fallthru\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax\n\t"
2040 /* jmp, but don't trust the assembler to choose the right jump */
2041 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2042 ".Lamd64_ne_fallthru:\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2044 "pop %rax");
2045
2046 if (offset_p)
2047 *offset_p = 13;
2048 if (size_p)
2049 *size_p = 4;
2050}
2051
df4a0200 2052static void
6b9801d4
SS
2053amd64_emit_lt_goto (int *offset_p, int *size_p)
2054{
2055 EMIT_ASM (amd64_lt,
2056 "cmp %rax,(%rsp)\n\t"
2057 "jnl .Lamd64_lt_fallthru\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax\n\t"
2060 /* jmp, but don't trust the assembler to choose the right jump */
2061 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2062 ".Lamd64_lt_fallthru:\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2064 "pop %rax");
2065
2066 if (offset_p)
2067 *offset_p = 13;
2068 if (size_p)
2069 *size_p = 4;
2070}
2071
df4a0200 2072static void
6b9801d4
SS
2073amd64_emit_le_goto (int *offset_p, int *size_p)
2074{
2075 EMIT_ASM (amd64_le,
2076 "cmp %rax,(%rsp)\n\t"
2077 "jnle .Lamd64_le_fallthru\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax\n\t"
2080 /* jmp, but don't trust the assembler to choose the right jump */
2081 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2082 ".Lamd64_le_fallthru:\n\t"
2083 "lea 0x8(%rsp),%rsp\n\t"
2084 "pop %rax");
2085
2086 if (offset_p)
2087 *offset_p = 13;
2088 if (size_p)
2089 *size_p = 4;
2090}
2091
df4a0200 2092static void
6b9801d4
SS
2093amd64_emit_gt_goto (int *offset_p, int *size_p)
2094{
2095 EMIT_ASM (amd64_gt,
2096 "cmp %rax,(%rsp)\n\t"
2097 "jng .Lamd64_gt_fallthru\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax\n\t"
2100 /* jmp, but don't trust the assembler to choose the right jump */
2101 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2102 ".Lamd64_gt_fallthru:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2104 "pop %rax");
2105
2106 if (offset_p)
2107 *offset_p = 13;
2108 if (size_p)
2109 *size_p = 4;
2110}
2111
df4a0200 2112static void
6b9801d4
SS
2113amd64_emit_ge_goto (int *offset_p, int *size_p)
2114{
2115 EMIT_ASM (amd64_ge,
2116 "cmp %rax,(%rsp)\n\t"
2117 "jnge .Lamd64_ge_fallthru\n\t"
2118 ".Lamd64_ge_jump:\n\t"
2119 "lea 0x8(%rsp),%rsp\n\t"
2120 "pop %rax\n\t"
2121 /* jmp, but don't trust the assembler to choose the right jump */
2122 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2123 ".Lamd64_ge_fallthru:\n\t"
2124 "lea 0x8(%rsp),%rsp\n\t"
2125 "pop %rax");
2126
2127 if (offset_p)
2128 *offset_p = 13;
2129 if (size_p)
2130 *size_p = 4;
2131}
2132
6a271cae
PA
2133struct emit_ops amd64_emit_ops =
2134 {
2135 amd64_emit_prologue,
2136 amd64_emit_epilogue,
2137 amd64_emit_add,
2138 amd64_emit_sub,
2139 amd64_emit_mul,
2140 amd64_emit_lsh,
2141 amd64_emit_rsh_signed,
2142 amd64_emit_rsh_unsigned,
2143 amd64_emit_ext,
2144 amd64_emit_log_not,
2145 amd64_emit_bit_and,
2146 amd64_emit_bit_or,
2147 amd64_emit_bit_xor,
2148 amd64_emit_bit_not,
2149 amd64_emit_equal,
2150 amd64_emit_less_signed,
2151 amd64_emit_less_unsigned,
2152 amd64_emit_ref,
2153 amd64_emit_if_goto,
2154 amd64_emit_goto,
2155 amd64_write_goto_address,
2156 amd64_emit_const,
2157 amd64_emit_call,
2158 amd64_emit_reg,
2159 amd64_emit_pop,
2160 amd64_emit_stack_flush,
2161 amd64_emit_zero_ext,
2162 amd64_emit_swap,
2163 amd64_emit_stack_adjust,
2164 amd64_emit_int_call_1,
6b9801d4
SS
2165 amd64_emit_void_call_2,
2166 amd64_emit_eq_goto,
2167 amd64_emit_ne_goto,
2168 amd64_emit_lt_goto,
2169 amd64_emit_le_goto,
2170 amd64_emit_gt_goto,
2171 amd64_emit_ge_goto
6a271cae
PA
2172 };
2173
2174#endif /* __x86_64__ */
2175
2176static void
2177i386_emit_prologue (void)
2178{
2179 EMIT_ASM32 (i386_prologue,
2180 "push %ebp\n\t"
bf15cbda
SS
2181 "mov %esp,%ebp\n\t"
2182 "push %ebx");
6a271cae
PA
2183 /* At this point, the raw regs base address is at 8(%ebp), and the
2184 value pointer is at 12(%ebp). */
2185}
2186
2187static void
2188i386_emit_epilogue (void)
2189{
2190 EMIT_ASM32 (i386_epilogue,
2191 "mov 12(%ebp),%ecx\n\t"
2192 "mov %eax,(%ecx)\n\t"
2193 "mov %ebx,0x4(%ecx)\n\t"
2194 "xor %eax,%eax\n\t"
bf15cbda 2195 "pop %ebx\n\t"
6a271cae
PA
2196 "pop %ebp\n\t"
2197 "ret");
2198}
2199
2200static void
2201i386_emit_add (void)
2202{
2203 EMIT_ASM32 (i386_add,
2204 "add (%esp),%eax\n\t"
2205 "adc 0x4(%esp),%ebx\n\t"
2206 "lea 0x8(%esp),%esp");
2207}
2208
2209static void
2210i386_emit_sub (void)
2211{
2212 EMIT_ASM32 (i386_sub,
2213 "subl %eax,(%esp)\n\t"
2214 "sbbl %ebx,4(%esp)\n\t"
2215 "pop %eax\n\t"
2216 "pop %ebx\n\t");
2217}
2218
2219static void
2220i386_emit_mul (void)
2221{
2222 emit_error = 1;
2223}
2224
2225static void
2226i386_emit_lsh (void)
2227{
2228 emit_error = 1;
2229}
2230
2231static void
2232i386_emit_rsh_signed (void)
2233{
2234 emit_error = 1;
2235}
2236
2237static void
2238i386_emit_rsh_unsigned (void)
2239{
2240 emit_error = 1;
2241}
2242
2243static void
2244i386_emit_ext (int arg)
2245{
2246 switch (arg)
2247 {
2248 case 8:
2249 EMIT_ASM32 (i386_ext_8,
2250 "cbtw\n\t"
2251 "cwtl\n\t"
2252 "movl %eax,%ebx\n\t"
2253 "sarl $31,%ebx");
2254 break;
2255 case 16:
2256 EMIT_ASM32 (i386_ext_16,
2257 "cwtl\n\t"
2258 "movl %eax,%ebx\n\t"
2259 "sarl $31,%ebx");
2260 break;
2261 case 32:
2262 EMIT_ASM32 (i386_ext_32,
2263 "movl %eax,%ebx\n\t"
2264 "sarl $31,%ebx");
2265 break;
2266 default:
2267 emit_error = 1;
2268 }
2269}
2270
2271static void
2272i386_emit_log_not (void)
2273{
2274 EMIT_ASM32 (i386_log_not,
2275 "or %ebx,%eax\n\t"
2276 "test %eax,%eax\n\t"
2277 "sete %cl\n\t"
2278 "xor %ebx,%ebx\n\t"
2279 "movzbl %cl,%eax");
2280}
2281
2282static void
2283i386_emit_bit_and (void)
2284{
2285 EMIT_ASM32 (i386_and,
2286 "and (%esp),%eax\n\t"
2287 "and 0x4(%esp),%ebx\n\t"
2288 "lea 0x8(%esp),%esp");
2289}
2290
2291static void
2292i386_emit_bit_or (void)
2293{
2294 EMIT_ASM32 (i386_or,
2295 "or (%esp),%eax\n\t"
2296 "or 0x4(%esp),%ebx\n\t"
2297 "lea 0x8(%esp),%esp");
2298}
2299
2300static void
2301i386_emit_bit_xor (void)
2302{
2303 EMIT_ASM32 (i386_xor,
2304 "xor (%esp),%eax\n\t"
2305 "xor 0x4(%esp),%ebx\n\t"
2306 "lea 0x8(%esp),%esp");
2307}
2308
2309static void
2310i386_emit_bit_not (void)
2311{
2312 EMIT_ASM32 (i386_bit_not,
2313 "xor $0xffffffff,%eax\n\t"
2314 "xor $0xffffffff,%ebx\n\t");
2315}
2316
2317static void
2318i386_emit_equal (void)
2319{
2320 EMIT_ASM32 (i386_equal,
2321 "cmpl %ebx,4(%esp)\n\t"
2322 "jne .Li386_equal_false\n\t"
2323 "cmpl %eax,(%esp)\n\t"
2324 "je .Li386_equal_true\n\t"
2325 ".Li386_equal_false:\n\t"
2326 "xor %eax,%eax\n\t"
2327 "jmp .Li386_equal_end\n\t"
2328 ".Li386_equal_true:\n\t"
2329 "mov $1,%eax\n\t"
2330 ".Li386_equal_end:\n\t"
2331 "xor %ebx,%ebx\n\t"
2332 "lea 0x8(%esp),%esp");
2333}
2334
2335static void
2336i386_emit_less_signed (void)
2337{
2338 EMIT_ASM32 (i386_less_signed,
2339 "cmpl %ebx,4(%esp)\n\t"
2340 "jl .Li386_less_signed_true\n\t"
2341 "jne .Li386_less_signed_false\n\t"
2342 "cmpl %eax,(%esp)\n\t"
2343 "jl .Li386_less_signed_true\n\t"
2344 ".Li386_less_signed_false:\n\t"
2345 "xor %eax,%eax\n\t"
2346 "jmp .Li386_less_signed_end\n\t"
2347 ".Li386_less_signed_true:\n\t"
2348 "mov $1,%eax\n\t"
2349 ".Li386_less_signed_end:\n\t"
2350 "xor %ebx,%ebx\n\t"
2351 "lea 0x8(%esp),%esp");
2352}
2353
2354static void
2355i386_emit_less_unsigned (void)
2356{
2357 EMIT_ASM32 (i386_less_unsigned,
2358 "cmpl %ebx,4(%esp)\n\t"
2359 "jb .Li386_less_unsigned_true\n\t"
2360 "jne .Li386_less_unsigned_false\n\t"
2361 "cmpl %eax,(%esp)\n\t"
2362 "jb .Li386_less_unsigned_true\n\t"
2363 ".Li386_less_unsigned_false:\n\t"
2364 "xor %eax,%eax\n\t"
2365 "jmp .Li386_less_unsigned_end\n\t"
2366 ".Li386_less_unsigned_true:\n\t"
2367 "mov $1,%eax\n\t"
2368 ".Li386_less_unsigned_end:\n\t"
2369 "xor %ebx,%ebx\n\t"
2370 "lea 0x8(%esp),%esp");
2371}
2372
2373static void
2374i386_emit_ref (int size)
2375{
2376 switch (size)
2377 {
2378 case 1:
2379 EMIT_ASM32 (i386_ref1,
2380 "movb (%eax),%al");
2381 break;
2382 case 2:
2383 EMIT_ASM32 (i386_ref2,
2384 "movw (%eax),%ax");
2385 break;
2386 case 4:
2387 EMIT_ASM32 (i386_ref4,
2388 "movl (%eax),%eax");
2389 break;
2390 case 8:
2391 EMIT_ASM32 (i386_ref8,
2392 "movl 4(%eax),%ebx\n\t"
2393 "movl (%eax),%eax");
2394 break;
2395 }
2396}
2397
2398static void
2399i386_emit_if_goto (int *offset_p, int *size_p)
2400{
2401 EMIT_ASM32 (i386_if_goto,
2402 "mov %eax,%ecx\n\t"
2403 "or %ebx,%ecx\n\t"
2404 "pop %eax\n\t"
2405 "pop %ebx\n\t"
2406 "cmpl $0,%ecx\n\t"
2407 /* Don't trust the assembler to choose the right jump */
2408 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2409
2410 if (offset_p)
2411 *offset_p = 11; /* be sure that this matches the sequence above */
2412 if (size_p)
2413 *size_p = 4;
2414}
2415
2416static void
2417i386_emit_goto (int *offset_p, int *size_p)
2418{
2419 EMIT_ASM32 (i386_goto,
2420 /* Don't trust the assembler to choose the right jump */
2421 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2422 if (offset_p)
2423 *offset_p = 1;
2424 if (size_p)
2425 *size_p = 4;
2426}
2427
2428static void
2429i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2430{
2431 int diff = (to - (from + size));
2432 unsigned char buf[sizeof (int)];
2433
2434 /* We're only doing 4-byte sizes at the moment. */
2435 if (size != 4)
2436 {
2437 emit_error = 1;
2438 return;
2439 }
2440
2441 memcpy (buf, &diff, sizeof (int));
4196ab2a 2442 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2443}
2444
2445static void
4e29fb54 2446i386_emit_const (LONGEST num)
6a271cae
PA
2447{
2448 unsigned char buf[16];
b00ad6ff 2449 int i, hi, lo;
6a271cae
PA
2450 CORE_ADDR buildaddr = current_insn_ptr;
2451
2452 i = 0;
2453 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2454 lo = num & 0xffffffff;
2455 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2456 i += 4;
2457 hi = ((num >> 32) & 0xffffffff);
2458 if (hi)
2459 {
2460 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2461 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2462 i += 4;
2463 }
2464 else
2465 {
2466 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2467 }
2468 append_insns (&buildaddr, i, buf);
2469 current_insn_ptr = buildaddr;
2470}
2471
2472static void
2473i386_emit_call (CORE_ADDR fn)
2474{
2475 unsigned char buf[16];
2476 int i, offset;
2477 CORE_ADDR buildaddr;
2478
2479 buildaddr = current_insn_ptr;
2480 i = 0;
2481 buf[i++] = 0xe8; /* call <reladdr> */
2482 offset = ((int) fn) - (buildaddr + 5);
2483 memcpy (buf + 1, &offset, 4);
2484 append_insns (&buildaddr, 5, buf);
2485 current_insn_ptr = buildaddr;
2486}
2487
2488static void
2489i386_emit_reg (int reg)
2490{
2491 unsigned char buf[16];
2492 int i;
2493 CORE_ADDR buildaddr;
2494
2495 EMIT_ASM32 (i386_reg_a,
2496 "sub $0x8,%esp");
2497 buildaddr = current_insn_ptr;
2498 i = 0;
2499 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2500 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2501 i += 4;
2502 append_insns (&buildaddr, i, buf);
2503 current_insn_ptr = buildaddr;
2504 EMIT_ASM32 (i386_reg_b,
2505 "mov %eax,4(%esp)\n\t"
2506 "mov 8(%ebp),%eax\n\t"
2507 "mov %eax,(%esp)");
2508 i386_emit_call (get_raw_reg_func_addr ());
2509 EMIT_ASM32 (i386_reg_c,
2510 "xor %ebx,%ebx\n\t"
2511 "lea 0x8(%esp),%esp");
2512}
2513
2514static void
2515i386_emit_pop (void)
2516{
2517 EMIT_ASM32 (i386_pop,
2518 "pop %eax\n\t"
2519 "pop %ebx");
2520}
2521
2522static void
2523i386_emit_stack_flush (void)
2524{
2525 EMIT_ASM32 (i386_stack_flush,
2526 "push %ebx\n\t"
2527 "push %eax");
2528}
2529
2530static void
2531i386_emit_zero_ext (int arg)
2532{
2533 switch (arg)
2534 {
2535 case 8:
2536 EMIT_ASM32 (i386_zero_ext_8,
2537 "and $0xff,%eax\n\t"
2538 "xor %ebx,%ebx");
2539 break;
2540 case 16:
2541 EMIT_ASM32 (i386_zero_ext_16,
2542 "and $0xffff,%eax\n\t"
2543 "xor %ebx,%ebx");
2544 break;
2545 case 32:
2546 EMIT_ASM32 (i386_zero_ext_32,
2547 "xor %ebx,%ebx");
2548 break;
2549 default:
2550 emit_error = 1;
2551 }
2552}
2553
2554static void
2555i386_emit_swap (void)
2556{
2557 EMIT_ASM32 (i386_swap,
2558 "mov %eax,%ecx\n\t"
2559 "mov %ebx,%edx\n\t"
2560 "pop %eax\n\t"
2561 "pop %ebx\n\t"
2562 "push %edx\n\t"
2563 "push %ecx");
2564}
2565
2566static void
2567i386_emit_stack_adjust (int n)
2568{
2569 unsigned char buf[16];
2570 int i;
2571 CORE_ADDR buildaddr = current_insn_ptr;
2572
2573 i = 0;
2574 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2575 buf[i++] = 0x64;
2576 buf[i++] = 0x24;
2577 buf[i++] = n * 8;
2578 append_insns (&buildaddr, i, buf);
2579 current_insn_ptr = buildaddr;
2580}
2581
2582/* FN's prototype is `LONGEST(*fn)(int)'. */
2583
2584static void
2585i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2586{
2587 unsigned char buf[16];
2588 int i;
2589 CORE_ADDR buildaddr;
2590
2591 EMIT_ASM32 (i386_int_call_1_a,
2592 /* Reserve a bit of stack space. */
2593 "sub $0x8,%esp");
2594 /* Put the one argument on the stack. */
2595 buildaddr = current_insn_ptr;
2596 i = 0;
2597 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2598 buf[i++] = 0x04;
2599 buf[i++] = 0x24;
b00ad6ff 2600 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2601 i += 4;
2602 append_insns (&buildaddr, i, buf);
2603 current_insn_ptr = buildaddr;
2604 i386_emit_call (fn);
2605 EMIT_ASM32 (i386_int_call_1_c,
2606 "mov %edx,%ebx\n\t"
2607 "lea 0x8(%esp),%esp");
2608}
2609
4e29fb54 2610/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2611
2612static void
2613i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2614{
2615 unsigned char buf[16];
2616 int i;
2617 CORE_ADDR buildaddr;
2618
2619 EMIT_ASM32 (i386_void_call_2_a,
2620 /* Preserve %eax only; we don't have to worry about %ebx. */
2621 "push %eax\n\t"
2622 /* Reserve a bit of stack space for arguments. */
2623 "sub $0x10,%esp\n\t"
2624 /* Copy "top" to the second argument position. (Note that
2625 we can't assume function won't scribble on its
2626 arguments, so don't try to restore from this.) */
2627 "mov %eax,4(%esp)\n\t"
2628 "mov %ebx,8(%esp)");
2629 /* Put the first argument on the stack. */
2630 buildaddr = current_insn_ptr;
2631 i = 0;
2632 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2633 buf[i++] = 0x04;
2634 buf[i++] = 0x24;
b00ad6ff 2635 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2636 i += 4;
2637 append_insns (&buildaddr, i, buf);
2638 current_insn_ptr = buildaddr;
2639 i386_emit_call (fn);
2640 EMIT_ASM32 (i386_void_call_2_b,
2641 "lea 0x10(%esp),%esp\n\t"
2642 /* Restore original stack top. */
2643 "pop %eax");
2644}
2645
6b9801d4 2646
df4a0200 2647static void
6b9801d4
SS
2648i386_emit_eq_goto (int *offset_p, int *size_p)
2649{
2650 EMIT_ASM32 (eq,
2651 /* Check low half first, more likely to be decider */
2652 "cmpl %eax,(%esp)\n\t"
2653 "jne .Leq_fallthru\n\t"
2654 "cmpl %ebx,4(%esp)\n\t"
2655 "jne .Leq_fallthru\n\t"
2656 "lea 0x8(%esp),%esp\n\t"
2657 "pop %eax\n\t"
2658 "pop %ebx\n\t"
2659 /* jmp, but don't trust the assembler to choose the right jump */
2660 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2661 ".Leq_fallthru:\n\t"
2662 "lea 0x8(%esp),%esp\n\t"
2663 "pop %eax\n\t"
2664 "pop %ebx");
2665
2666 if (offset_p)
2667 *offset_p = 18;
2668 if (size_p)
2669 *size_p = 4;
2670}
2671
df4a0200 2672static void
6b9801d4
SS
2673i386_emit_ne_goto (int *offset_p, int *size_p)
2674{
2675 EMIT_ASM32 (ne,
2676 /* Check low half first, more likely to be decider */
2677 "cmpl %eax,(%esp)\n\t"
2678 "jne .Lne_jump\n\t"
2679 "cmpl %ebx,4(%esp)\n\t"
2680 "je .Lne_fallthru\n\t"
2681 ".Lne_jump:\n\t"
2682 "lea 0x8(%esp),%esp\n\t"
2683 "pop %eax\n\t"
2684 "pop %ebx\n\t"
2685 /* jmp, but don't trust the assembler to choose the right jump */
2686 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2687 ".Lne_fallthru:\n\t"
2688 "lea 0x8(%esp),%esp\n\t"
2689 "pop %eax\n\t"
2690 "pop %ebx");
2691
2692 if (offset_p)
2693 *offset_p = 18;
2694 if (size_p)
2695 *size_p = 4;
2696}
2697
df4a0200 2698static void
6b9801d4
SS
2699i386_emit_lt_goto (int *offset_p, int *size_p)
2700{
2701 EMIT_ASM32 (lt,
2702 "cmpl %ebx,4(%esp)\n\t"
2703 "jl .Llt_jump\n\t"
2704 "jne .Llt_fallthru\n\t"
2705 "cmpl %eax,(%esp)\n\t"
2706 "jnl .Llt_fallthru\n\t"
2707 ".Llt_jump:\n\t"
2708 "lea 0x8(%esp),%esp\n\t"
2709 "pop %eax\n\t"
2710 "pop %ebx\n\t"
2711 /* jmp, but don't trust the assembler to choose the right jump */
2712 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2713 ".Llt_fallthru:\n\t"
2714 "lea 0x8(%esp),%esp\n\t"
2715 "pop %eax\n\t"
2716 "pop %ebx");
2717
2718 if (offset_p)
2719 *offset_p = 20;
2720 if (size_p)
2721 *size_p = 4;
2722}
2723
df4a0200 2724static void
6b9801d4
SS
2725i386_emit_le_goto (int *offset_p, int *size_p)
2726{
2727 EMIT_ASM32 (le,
2728 "cmpl %ebx,4(%esp)\n\t"
2729 "jle .Lle_jump\n\t"
2730 "jne .Lle_fallthru\n\t"
2731 "cmpl %eax,(%esp)\n\t"
2732 "jnle .Lle_fallthru\n\t"
2733 ".Lle_jump:\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2735 "pop %eax\n\t"
2736 "pop %ebx\n\t"
2737 /* jmp, but don't trust the assembler to choose the right jump */
2738 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2739 ".Lle_fallthru:\n\t"
2740 "lea 0x8(%esp),%esp\n\t"
2741 "pop %eax\n\t"
2742 "pop %ebx");
2743
2744 if (offset_p)
2745 *offset_p = 20;
2746 if (size_p)
2747 *size_p = 4;
2748}
2749
df4a0200 2750static void
6b9801d4
SS
2751i386_emit_gt_goto (int *offset_p, int *size_p)
2752{
2753 EMIT_ASM32 (gt,
2754 "cmpl %ebx,4(%esp)\n\t"
2755 "jg .Lgt_jump\n\t"
2756 "jne .Lgt_fallthru\n\t"
2757 "cmpl %eax,(%esp)\n\t"
2758 "jng .Lgt_fallthru\n\t"
2759 ".Lgt_jump:\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx\n\t"
2763 /* jmp, but don't trust the assembler to choose the right jump */
2764 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2765 ".Lgt_fallthru:\n\t"
2766 "lea 0x8(%esp),%esp\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx");
2769
2770 if (offset_p)
2771 *offset_p = 20;
2772 if (size_p)
2773 *size_p = 4;
2774}
2775
df4a0200 2776static void
6b9801d4
SS
2777i386_emit_ge_goto (int *offset_p, int *size_p)
2778{
2779 EMIT_ASM32 (ge,
2780 "cmpl %ebx,4(%esp)\n\t"
2781 "jge .Lge_jump\n\t"
2782 "jne .Lge_fallthru\n\t"
2783 "cmpl %eax,(%esp)\n\t"
2784 "jnge .Lge_fallthru\n\t"
2785 ".Lge_jump:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2787 "pop %eax\n\t"
2788 "pop %ebx\n\t"
2789 /* jmp, but don't trust the assembler to choose the right jump */
2790 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2791 ".Lge_fallthru:\n\t"
2792 "lea 0x8(%esp),%esp\n\t"
2793 "pop %eax\n\t"
2794 "pop %ebx");
2795
2796 if (offset_p)
2797 *offset_p = 20;
2798 if (size_p)
2799 *size_p = 4;
2800}
2801
6a271cae
PA
2802struct emit_ops i386_emit_ops =
2803 {
2804 i386_emit_prologue,
2805 i386_emit_epilogue,
2806 i386_emit_add,
2807 i386_emit_sub,
2808 i386_emit_mul,
2809 i386_emit_lsh,
2810 i386_emit_rsh_signed,
2811 i386_emit_rsh_unsigned,
2812 i386_emit_ext,
2813 i386_emit_log_not,
2814 i386_emit_bit_and,
2815 i386_emit_bit_or,
2816 i386_emit_bit_xor,
2817 i386_emit_bit_not,
2818 i386_emit_equal,
2819 i386_emit_less_signed,
2820 i386_emit_less_unsigned,
2821 i386_emit_ref,
2822 i386_emit_if_goto,
2823 i386_emit_goto,
2824 i386_write_goto_address,
2825 i386_emit_const,
2826 i386_emit_call,
2827 i386_emit_reg,
2828 i386_emit_pop,
2829 i386_emit_stack_flush,
2830 i386_emit_zero_ext,
2831 i386_emit_swap,
2832 i386_emit_stack_adjust,
2833 i386_emit_int_call_1,
6b9801d4
SS
2834 i386_emit_void_call_2,
2835 i386_emit_eq_goto,
2836 i386_emit_ne_goto,
2837 i386_emit_lt_goto,
2838 i386_emit_le_goto,
2839 i386_emit_gt_goto,
2840 i386_emit_ge_goto
6a271cae
PA
2841 };
2842
2843
2844static struct emit_ops *
2845x86_emit_ops (void)
2846{
2847#ifdef __x86_64__
3aee8918 2848 if (is_64bit_tdesc ())
6a271cae
PA
2849 return &amd64_emit_ops;
2850 else
2851#endif
2852 return &i386_emit_ops;
2853}
2854
dd373349
AT
2855/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2856
2857static const gdb_byte *
2858x86_sw_breakpoint_from_kind (int kind, int *size)
2859{
2860 *size = x86_breakpoint_len;
2861 return x86_breakpoint;
2862}
2863
c2d6af84
PA
2864static int
2865x86_supports_range_stepping (void)
2866{
2867 return 1;
2868}
2869
7d00775e
AT
2870/* Implementation of linux_target_ops method "supports_hardware_single_step".
2871 */
2872
2873static int
2874x86_supports_hardware_single_step (void)
2875{
2876 return 1;
2877}
2878
ae91f625
MK
2879static int
2880x86_get_ipa_tdesc_idx (void)
2881{
2882 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2883 const struct target_desc *tdesc = regcache->tdesc;
2884
2885#ifdef __x86_64__
b4570e4b 2886 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2887#endif
2888
f49ff000 2889 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2890 return X86_TDESC_SSE;
ae91f625 2891
f49ff000 2892 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2893}
2894
d0722149
DE
2895/* This is initialized assuming an amd64 target.
2896 x86_arch_setup will correct it for i386 or amd64 targets. */
2897
2898struct linux_target_ops the_low_target =
2899{
dd373349
AT
2900 NULL, /* breakpoint_kind_from_pc */
2901 x86_sw_breakpoint_from_kind,
d0722149
DE
2902 NULL,
2903 1,
2904 x86_breakpoint_at,
802e8e6d 2905 x86_supports_z_point_type,
aa5ca48f
DE
2906 x86_insert_point,
2907 x86_remove_point,
2908 x86_stopped_by_watchpoint,
2909 x86_stopped_data_address,
d0722149
DE
2910 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2911 native i386 case (no registers smaller than an xfer unit), and are not
2912 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2913 NULL,
2914 NULL,
2915 /* need to fix up i386 siginfo if host is amd64 */
2916 x86_siginfo_fixup,
aa5ca48f 2917 x86_linux_new_process,
04ec7890 2918 x86_linux_delete_process,
aa5ca48f 2919 x86_linux_new_thread,
466eecee 2920 x86_linux_delete_thread,
3a8a0396 2921 x86_linux_new_fork,
1570b33e 2922 x86_linux_prepare_to_resume,
219f2f23 2923 x86_linux_process_qsupported,
fa593d66
PA
2924 x86_supports_tracepoints,
2925 x86_get_thread_area,
6a271cae 2926 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2927 x86_emit_ops,
2928 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2929 x86_supports_range_stepping,
7d00775e
AT
2930 NULL, /* breakpoint_kind_from_current_state */
2931 x86_supports_hardware_single_step,
82075af2 2932 x86_get_syscall_trapinfo,
ae91f625 2933 x86_get_ipa_tdesc_idx,
d0722149 2934};
3aee8918 2935
ef0478f6
TBA
2936/* The linux target ops object. */
2937
2938linux_process_target *the_linux_target = &the_x86_target;
2939
3aee8918
PA
2940void
2941initialize_low_arch (void)
2942{
2943 /* Initialize the Linux target descriptions. */
2944#ifdef __x86_64__
cc397f3a 2945 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2946 copy_target_description (tdesc_amd64_linux_no_xml,
2947 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2948 false));
3aee8918
PA
2949 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2950#endif
f49ff000 2951
cc397f3a 2952 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2953 copy_target_description (tdesc_i386_linux_no_xml,
2954 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2955 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2956
2957 initialize_regsets_info (&x86_regsets_info);
2958}
This page took 1.077079 seconds and 4 git commands to generate.