gdbserver/linux-low: turn 'breakpoint_at' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
3ca4edb6
TBA
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
797bcff5
TBA
111protected:
112
113 void low_arch_setup () override;
daca57a7
TBA
114
115 bool low_cannot_fetch_register (int regno) override;
116
117 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
118
119 bool low_supports_breakpoints () override;
120
121 CORE_ADDR low_get_pc (regcache *regcache) override;
122
123 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
124
125 int low_decr_pc_after_break () override;
d7146cda
TBA
126
127 bool low_breakpoint_at (CORE_ADDR pc) override;
ef0478f6
TBA
128};
129
130/* The singleton target ops object. */
131
132static x86_target the_x86_target;
133
aa5ca48f
DE
134/* Per-process arch-specific data we want to keep. */
135
136struct arch_process_info
137{
df7e5265 138 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
139};
140
d0722149
DE
141#ifdef __x86_64__
142
143/* Mapping between the general-purpose registers in `struct user'
144 format and GDB's register array layout.
145 Note that the transfer layout uses 64-bit regs. */
146static /*const*/ int i386_regmap[] =
147{
148 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
149 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
150 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
151 DS * 8, ES * 8, FS * 8, GS * 8
152};
153
154#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156/* So code below doesn't have to care, i386 or amd64. */
157#define ORIG_EAX ORIG_RAX
bc9540e8 158#define REGSIZE 8
d0722149
DE
159
160static const int x86_64_regmap[] =
161{
162 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
163 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
164 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
165 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
166 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
167 DS * 8, ES * 8, FS * 8, GS * 8,
168 -1, -1, -1, -1, -1, -1, -1, -1,
169 -1, -1, -1, -1, -1, -1, -1, -1,
170 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
171 -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
173 ORIG_RAX * 8,
2735833d
WT
174#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
175 21 * 8, 22 * 8,
176#else
177 -1, -1,
178#endif
a196ebeb 179 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
180 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
181 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
182 -1, -1, -1, -1, -1, -1, -1, -1,
183 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
186 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1 /* pkru */
d0722149
DE
191};
192
193#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 194#define X86_64_USER_REGS (GS + 1)
d0722149
DE
195
196#else /* ! __x86_64__ */
197
198/* Mapping between the general-purpose registers in `struct user'
199 format and GDB's register array layout. */
200static /*const*/ int i386_regmap[] =
201{
202 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
203 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
204 EIP * 4, EFL * 4, CS * 4, SS * 4,
205 DS * 4, ES * 4, FS * 4, GS * 4
206};
207
208#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
209
bc9540e8
PA
210#define REGSIZE 4
211
d0722149 212#endif
3aee8918
PA
213
214#ifdef __x86_64__
215
216/* Returns true if the current inferior belongs to a x86-64 process,
217 per the tdesc. */
218
219static int
220is_64bit_tdesc (void)
221{
0bfdf32f 222 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
223
224 return register_size (regcache->tdesc, 0) == 8;
225}
226
227#endif
228
d0722149
DE
229\f
230/* Called by libthread_db. */
231
232ps_err_e
754653a7 233ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
234 lwpid_t lwpid, int idx, void **base)
235{
236#ifdef __x86_64__
3aee8918 237 int use_64bit = is_64bit_tdesc ();
d0722149
DE
238
239 if (use_64bit)
240 {
241 switch (idx)
242 {
243 case FS:
244 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
245 return PS_OK;
246 break;
247 case GS:
248 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
249 return PS_OK;
250 break;
251 default:
252 return PS_BADADDR;
253 }
254 return PS_ERR;
255 }
256#endif
257
258 {
259 unsigned int desc[4];
260
261 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
262 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
263 return PS_ERR;
264
d1ec4ce7
DE
265 /* Ensure we properly extend the value to 64-bits for x86_64. */
266 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
267 return PS_OK;
268 }
269}
fa593d66
PA
270
271/* Get the thread area address. This is used to recognize which
272 thread is which when tracing with the in-process agent library. We
273 don't read anything from the address, and treat it as opaque; it's
274 the address itself that we assume is unique per-thread. */
275
276static int
277x86_get_thread_area (int lwpid, CORE_ADDR *addr)
278{
279#ifdef __x86_64__
3aee8918 280 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
281
282 if (use_64bit)
283 {
284 void *base;
285 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
286 {
287 *addr = (CORE_ADDR) (uintptr_t) base;
288 return 0;
289 }
290
291 return -1;
292 }
293#endif
294
295 {
f2907e49 296 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
297 struct thread_info *thr = get_lwp_thread (lwp);
298 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
299 unsigned int desc[4];
300 ULONGEST gs = 0;
301 const int reg_thread_area = 3; /* bits to scale down register value. */
302 int idx;
303
304 collect_register_by_name (regcache, "gs", &gs);
305
306 idx = gs >> reg_thread_area;
307
308 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 309 lwpid_of (thr),
493e2a69 310 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
311 return -1;
312
313 *addr = desc[1];
314 return 0;
315 }
316}
317
318
d0722149 319\f
daca57a7
TBA
320bool
321x86_target::low_cannot_store_register (int regno)
d0722149 322{
3aee8918
PA
323#ifdef __x86_64__
324 if (is_64bit_tdesc ())
daca57a7 325 return false;
3aee8918
PA
326#endif
327
d0722149
DE
328 return regno >= I386_NUM_REGS;
329}
330
daca57a7
TBA
331bool
332x86_target::low_cannot_fetch_register (int regno)
d0722149 333{
3aee8918
PA
334#ifdef __x86_64__
335 if (is_64bit_tdesc ())
daca57a7 336 return false;
3aee8918
PA
337#endif
338
d0722149
DE
339 return regno >= I386_NUM_REGS;
340}
341
342static void
442ea881 343x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
344{
345 int i;
346
347#ifdef __x86_64__
3aee8918 348 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
349 {
350 for (i = 0; i < X86_64_NUM_REGS; i++)
351 if (x86_64_regmap[i] != -1)
442ea881 352 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
353
354#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
355 {
356 unsigned long base;
357 int lwpid = lwpid_of (current_thread);
358
359 collect_register_by_name (regcache, "fs_base", &base);
360 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
361
362 collect_register_by_name (regcache, "gs_base", &base);
363 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
364 }
365#endif
366
d0722149
DE
367 return;
368 }
9e0aa64f
JK
369
370 /* 32-bit inferior registers need to be zero-extended.
371 Callers would read uninitialized memory otherwise. */
372 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
373#endif
374
375 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 376 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 377
442ea881 378 collect_register_by_name (regcache, "orig_eax",
bc9540e8 379 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 380
e90a813d 381#ifdef __x86_64__
3f52fdbc
KB
382 /* Sign extend EAX value to avoid potential syscall restart
383 problems.
384
385 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
386 for a detailed explanation. */
387 if (register_size (regcache->tdesc, 0) == 4)
388 {
389 void *ptr = ((gdb_byte *) buf
390 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
391
392 *(int64_t *) ptr = *(int32_t *) ptr;
393 }
e90a813d 394#endif
d0722149
DE
395}
396
397static void
442ea881 398x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
399{
400 int i;
401
402#ifdef __x86_64__
3aee8918 403 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
404 {
405 for (i = 0; i < X86_64_NUM_REGS; i++)
406 if (x86_64_regmap[i] != -1)
442ea881 407 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
408
409#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
410 {
411 unsigned long base;
412 int lwpid = lwpid_of (current_thread);
413
414 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
415 supply_register_by_name (regcache, "fs_base", &base);
416
417 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
418 supply_register_by_name (regcache, "gs_base", &base);
419 }
420#endif
d0722149
DE
421 return;
422 }
423#endif
424
425 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 426 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 427
442ea881 428 supply_register_by_name (regcache, "orig_eax",
bc9540e8 429 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
430}
431
432static void
442ea881 433x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
434{
435#ifdef __x86_64__
442ea881 436 i387_cache_to_fxsave (regcache, buf);
d0722149 437#else
442ea881 438 i387_cache_to_fsave (regcache, buf);
d0722149
DE
439#endif
440}
441
442static void
442ea881 443x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
444{
445#ifdef __x86_64__
442ea881 446 i387_fxsave_to_cache (regcache, buf);
d0722149 447#else
442ea881 448 i387_fsave_to_cache (regcache, buf);
d0722149
DE
449#endif
450}
451
452#ifndef __x86_64__
453
454static void
442ea881 455x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 456{
442ea881 457 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
458}
459
460static void
442ea881 461x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 462{
442ea881 463 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
464}
465
466#endif
467
1570b33e
L
468static void
469x86_fill_xstateregset (struct regcache *regcache, void *buf)
470{
471 i387_cache_to_xsave (regcache, buf);
472}
473
474static void
475x86_store_xstateregset (struct regcache *regcache, const void *buf)
476{
477 i387_xsave_to_cache (regcache, buf);
478}
479
d0722149
DE
480/* ??? The non-biarch i386 case stores all the i387 regs twice.
481 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
482 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
483 doesn't work. IWBN to avoid the duplication in the case where it
484 does work. Maybe the arch_setup routine could check whether it works
3aee8918 485 and update the supported regsets accordingly. */
d0722149 486
3aee8918 487static struct regset_info x86_regsets[] =
d0722149
DE
488{
489#ifdef HAVE_PTRACE_GETREGS
1570b33e 490 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
491 GENERAL_REGS,
492 x86_fill_gregset, x86_store_gregset },
1570b33e
L
493 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
494 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
495# ifndef __x86_64__
496# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 497 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
498 EXTENDED_REGS,
499 x86_fill_fpxregset, x86_store_fpxregset },
500# endif
501# endif
1570b33e 502 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
503 FP_REGS,
504 x86_fill_fpregset, x86_store_fpregset },
505#endif /* HAVE_PTRACE_GETREGS */
50bc912a 506 NULL_REGSET
d0722149
DE
507};
508
bf9ae9d8
TBA
509bool
510x86_target::low_supports_breakpoints ()
511{
512 return true;
513}
514
515CORE_ADDR
516x86_target::low_get_pc (regcache *regcache)
d0722149 517{
3aee8918 518 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
519
520 if (use_64bit)
521 {
6598661d
PA
522 uint64_t pc;
523
442ea881 524 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
525 return (CORE_ADDR) pc;
526 }
527 else
528 {
6598661d
PA
529 uint32_t pc;
530
442ea881 531 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
532 return (CORE_ADDR) pc;
533 }
534}
535
bf9ae9d8
TBA
536void
537x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 538{
3aee8918 539 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
540
541 if (use_64bit)
542 {
6598661d
PA
543 uint64_t newpc = pc;
544
442ea881 545 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
546 }
547 else
548 {
6598661d
PA
549 uint32_t newpc = pc;
550
442ea881 551 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
552 }
553}
d4807ea2
TBA
554
555int
556x86_target::low_decr_pc_after_break ()
557{
558 return 1;
559}
560
d0722149 561\f
dd373349 562static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
563#define x86_breakpoint_len 1
564
d7146cda
TBA
565bool
566x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
567{
568 unsigned char c;
569
d7146cda 570 read_memory (pc, &c, 1);
d0722149 571 if (c == 0xCC)
d7146cda 572 return true;
d0722149 573
d7146cda 574 return false;
d0722149
DE
575}
576\f
42995dbd 577/* Low-level function vector. */
df7e5265 578struct x86_dr_low_type x86_dr_low =
42995dbd 579 {
d33472ad
GB
580 x86_linux_dr_set_control,
581 x86_linux_dr_set_addr,
582 x86_linux_dr_get_addr,
583 x86_linux_dr_get_status,
584 x86_linux_dr_get_control,
42995dbd
GB
585 sizeof (void *),
586 };
aa5ca48f 587\f
90d74c30 588/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
589
590static int
802e8e6d
PA
591x86_supports_z_point_type (char z_type)
592{
593 switch (z_type)
594 {
595 case Z_PACKET_SW_BP:
596 case Z_PACKET_HW_BP:
597 case Z_PACKET_WRITE_WP:
598 case Z_PACKET_ACCESS_WP:
599 return 1;
600 default:
601 return 0;
602 }
603}
604
605static int
606x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
607 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
608{
609 struct process_info *proc = current_process ();
802e8e6d 610
aa5ca48f
DE
611 switch (type)
612 {
802e8e6d
PA
613 case raw_bkpt_type_hw:
614 case raw_bkpt_type_write_wp:
615 case raw_bkpt_type_access_wp:
a4165e94 616 {
802e8e6d
PA
617 enum target_hw_bp_type hw_type
618 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 619 struct x86_debug_reg_state *state
fe978cb0 620 = &proc->priv->arch_private->debug_reg_state;
a4165e94 621
df7e5265 622 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 623 }
961bd387 624
aa5ca48f
DE
625 default:
626 /* Unsupported. */
627 return 1;
628 }
629}
630
631static int
802e8e6d
PA
632x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
633 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
634{
635 struct process_info *proc = current_process ();
802e8e6d 636
aa5ca48f
DE
637 switch (type)
638 {
802e8e6d
PA
639 case raw_bkpt_type_hw:
640 case raw_bkpt_type_write_wp:
641 case raw_bkpt_type_access_wp:
a4165e94 642 {
802e8e6d
PA
643 enum target_hw_bp_type hw_type
644 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 645 struct x86_debug_reg_state *state
fe978cb0 646 = &proc->priv->arch_private->debug_reg_state;
a4165e94 647
df7e5265 648 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 649 }
aa5ca48f
DE
650 default:
651 /* Unsupported. */
652 return 1;
653 }
654}
655
656static int
657x86_stopped_by_watchpoint (void)
658{
659 struct process_info *proc = current_process ();
fe978cb0 660 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
661}
662
663static CORE_ADDR
664x86_stopped_data_address (void)
665{
666 struct process_info *proc = current_process ();
667 CORE_ADDR addr;
fe978cb0 668 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 669 &addr))
aa5ca48f
DE
670 return addr;
671 return 0;
672}
673\f
674/* Called when a new process is created. */
675
676static struct arch_process_info *
677x86_linux_new_process (void)
678{
ed859da7 679 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 680
df7e5265 681 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
682
683 return info;
684}
685
04ec7890
SM
686/* Called when a process is being deleted. */
687
688static void
689x86_linux_delete_process (struct arch_process_info *info)
690{
691 xfree (info);
692}
693
3a8a0396
DB
694/* Target routine for linux_new_fork. */
695
696static void
697x86_linux_new_fork (struct process_info *parent, struct process_info *child)
698{
699 /* These are allocated by linux_add_process. */
700 gdb_assert (parent->priv != NULL
701 && parent->priv->arch_private != NULL);
702 gdb_assert (child->priv != NULL
703 && child->priv->arch_private != NULL);
704
705 /* Linux kernel before 2.6.33 commit
706 72f674d203cd230426437cdcf7dd6f681dad8b0d
707 will inherit hardware debug registers from parent
708 on fork/vfork/clone. Newer Linux kernels create such tasks with
709 zeroed debug registers.
710
711 GDB core assumes the child inherits the watchpoints/hw
712 breakpoints of the parent, and will remove them all from the
713 forked off process. Copy the debug registers mirrors into the
714 new process so that all breakpoints and watchpoints can be
715 removed together. The debug registers mirror will become zeroed
716 in the end before detaching the forked off process, thus making
717 this compatible with older Linux kernels too. */
718
719 *child->priv->arch_private = *parent->priv->arch_private;
720}
721
70a0bb6b
GB
722/* See nat/x86-dregs.h. */
723
724struct x86_debug_reg_state *
725x86_debug_reg_state (pid_t pid)
726{
727 struct process_info *proc = find_process_pid (pid);
728
729 return &proc->priv->arch_private->debug_reg_state;
730}
aa5ca48f 731\f
d0722149
DE
732/* When GDBSERVER is built as a 64-bit application on linux, the
733 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
734 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
735 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
736 conversion in-place ourselves. */
737
9cf12d57 738/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
739 layout of the inferiors' architecture. Returns true if any
740 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 741 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
742 INF. */
743
744static int
9cf12d57 745x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
746{
747#ifdef __x86_64__
760256f9 748 unsigned int machine;
0bfdf32f 749 int tid = lwpid_of (current_thread);
760256f9
PA
750 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
751
d0722149 752 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 753 if (!is_64bit_tdesc ())
9cf12d57 754 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 755 FIXUP_32);
c92b5177 756 /* No fixup for native x32 GDB. */
760256f9 757 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 758 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 759 FIXUP_X32);
d0722149
DE
760#endif
761
762 return 0;
763}
764\f
1570b33e
L
765static int use_xml;
766
3aee8918
PA
767/* Format of XSAVE extended state is:
768 struct
769 {
770 fxsave_bytes[0..463]
771 sw_usable_bytes[464..511]
772 xstate_hdr_bytes[512..575]
773 avx_bytes[576..831]
774 future_state etc
775 };
776
777 Same memory layout will be used for the coredump NT_X86_XSTATE
778 representing the XSAVE extended state registers.
779
780 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
781 extended state mask, which is the same as the extended control register
782 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
783 together with the mask saved in the xstate_hdr_bytes to determine what
784 states the processor/OS supports and what state, used or initialized,
785 the process/thread is in. */
786#define I386_LINUX_XSAVE_XCR0_OFFSET 464
787
788/* Does the current host support the GETFPXREGS request? The header
789 file may or may not define it, and even if it is defined, the
790 kernel will return EIO if it's running on a pre-SSE processor. */
791int have_ptrace_getfpxregs =
792#ifdef HAVE_PTRACE_GETFPXREGS
793 -1
794#else
795 0
796#endif
797;
1570b33e 798
3aee8918
PA
799/* Get Linux/x86 target description from running target. */
800
801static const struct target_desc *
802x86_linux_read_description (void)
1570b33e 803{
3aee8918
PA
804 unsigned int machine;
805 int is_elf64;
a196ebeb 806 int xcr0_features;
3aee8918
PA
807 int tid;
808 static uint64_t xcr0;
3a13a53b 809 struct regset_info *regset;
1570b33e 810
0bfdf32f 811 tid = lwpid_of (current_thread);
1570b33e 812
3aee8918 813 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 814
3aee8918 815 if (sizeof (void *) == 4)
3a13a53b 816 {
3aee8918
PA
817 if (is_elf64 > 0)
818 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
819#ifndef __x86_64__
820 else if (machine == EM_X86_64)
821 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
822#endif
823 }
3a13a53b 824
3aee8918
PA
825#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
826 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
827 {
828 elf_fpxregset_t fpxregs;
3a13a53b 829
3aee8918 830 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 831 {
3aee8918
PA
832 have_ptrace_getfpxregs = 0;
833 have_ptrace_getregset = 0;
f49ff000 834 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 835 }
3aee8918
PA
836 else
837 have_ptrace_getfpxregs = 1;
3a13a53b 838 }
1570b33e
L
839#endif
840
841 if (!use_xml)
842 {
df7e5265 843 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 844
1570b33e
L
845 /* Don't use XML. */
846#ifdef __x86_64__
3aee8918
PA
847 if (machine == EM_X86_64)
848 return tdesc_amd64_linux_no_xml;
1570b33e 849 else
1570b33e 850#endif
3aee8918 851 return tdesc_i386_linux_no_xml;
1570b33e
L
852 }
853
1570b33e
L
854 if (have_ptrace_getregset == -1)
855 {
df7e5265 856 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 857 struct iovec iov;
1570b33e
L
858
859 iov.iov_base = xstateregs;
860 iov.iov_len = sizeof (xstateregs);
861
862 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
863 if (ptrace (PTRACE_GETREGSET, tid,
864 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
865 have_ptrace_getregset = 0;
866 else
1570b33e 867 {
3aee8918
PA
868 have_ptrace_getregset = 1;
869
870 /* Get XCR0 from XSAVE extended state. */
871 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
872 / sizeof (uint64_t))];
873
874 /* Use PTRACE_GETREGSET if it is available. */
875 for (regset = x86_regsets;
876 regset->fill_function != NULL; regset++)
877 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 878 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
879 else if (regset->type != GENERAL_REGS)
880 regset->size = 0;
1570b33e 881 }
1570b33e
L
882 }
883
3aee8918 884 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 885 xcr0_features = (have_ptrace_getregset
2e1e43e1 886 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 887
a196ebeb 888 if (xcr0_features)
3aee8918 889 x86_xcr0 = xcr0;
1570b33e 890
3aee8918
PA
891 if (machine == EM_X86_64)
892 {
1570b33e 893#ifdef __x86_64__
b4570e4b 894 const target_desc *tdesc = NULL;
a196ebeb 895
b4570e4b 896 if (xcr0_features)
3aee8918 897 {
b4570e4b
YQ
898 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
899 !is_elf64);
1570b33e 900 }
b4570e4b
YQ
901
902 if (tdesc == NULL)
903 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
904 return tdesc;
3aee8918 905#endif
1570b33e 906 }
3aee8918
PA
907 else
908 {
f49ff000 909 const target_desc *tdesc = NULL;
a1fa17ee 910
f49ff000
YQ
911 if (xcr0_features)
912 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 913
f49ff000
YQ
914 if (tdesc == NULL)
915 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 916
f49ff000 917 return tdesc;
3aee8918
PA
918 }
919
920 gdb_assert_not_reached ("failed to return tdesc");
921}
922
3aee8918
PA
923/* Update all the target description of all processes; a new GDB
924 connected, and it may or not support xml target descriptions. */
925
797bcff5
TBA
926void
927x86_target::update_xmltarget ()
3aee8918 928{
0bfdf32f 929 struct thread_info *saved_thread = current_thread;
3aee8918
PA
930
931 /* Before changing the register cache's internal layout, flush the
932 contents of the current valid caches back to the threads, and
933 release the current regcache objects. */
934 regcache_release ();
935
797bcff5 936 for_each_process ([this] (process_info *proc) {
9179355e
SM
937 int pid = proc->pid;
938
939 /* Look up any thread of this process. */
940 current_thread = find_any_thread_of_pid (pid);
941
797bcff5 942 low_arch_setup ();
9179355e 943 });
3aee8918 944
0bfdf32f 945 current_thread = saved_thread;
1570b33e
L
946}
947
948/* Process qSupported query, "xmlRegisters=". Update the buffer size for
949 PTRACE_GETREGSET. */
950
951static void
06e03fff 952x86_linux_process_qsupported (char **features, int count)
1570b33e 953{
06e03fff
PA
954 int i;
955
1570b33e
L
956 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
957 with "i386" in qSupported query, it supports x86 XML target
958 descriptions. */
959 use_xml = 0;
06e03fff 960 for (i = 0; i < count; i++)
1570b33e 961 {
06e03fff 962 const char *feature = features[i];
1570b33e 963
06e03fff 964 if (startswith (feature, "xmlRegisters="))
1570b33e 965 {
06e03fff 966 char *copy = xstrdup (feature + 13);
06e03fff 967
ca3a04f6
CB
968 char *saveptr;
969 for (char *p = strtok_r (copy, ",", &saveptr);
970 p != NULL;
971 p = strtok_r (NULL, ",", &saveptr))
1570b33e 972 {
06e03fff
PA
973 if (strcmp (p, "i386") == 0)
974 {
975 use_xml = 1;
976 break;
977 }
1570b33e 978 }
1570b33e 979
06e03fff
PA
980 free (copy);
981 }
1570b33e 982 }
797bcff5 983 the_x86_target.update_xmltarget ();
1570b33e
L
984}
985
3aee8918 986/* Common for x86/x86-64. */
d0722149 987
3aee8918
PA
988static struct regsets_info x86_regsets_info =
989 {
990 x86_regsets, /* regsets */
991 0, /* num_regsets */
992 NULL, /* disabled_regsets */
993 };
214d508e
L
994
995#ifdef __x86_64__
3aee8918
PA
996static struct regs_info amd64_linux_regs_info =
997 {
998 NULL, /* regset_bitmap */
999 NULL, /* usrregs_info */
1000 &x86_regsets_info
1001 };
d0722149 1002#endif
3aee8918
PA
1003static struct usrregs_info i386_linux_usrregs_info =
1004 {
1005 I386_NUM_REGS,
1006 i386_regmap,
1007 };
d0722149 1008
3aee8918
PA
1009static struct regs_info i386_linux_regs_info =
1010 {
1011 NULL, /* regset_bitmap */
1012 &i386_linux_usrregs_info,
1013 &x86_regsets_info
1014 };
d0722149 1015
aa8d21c9
TBA
1016const regs_info *
1017x86_target::get_regs_info ()
3aee8918
PA
1018{
1019#ifdef __x86_64__
1020 if (is_64bit_tdesc ())
1021 return &amd64_linux_regs_info;
1022 else
1023#endif
1024 return &i386_linux_regs_info;
1025}
d0722149 1026
3aee8918
PA
1027/* Initialize the target description for the architecture of the
1028 inferior. */
1570b33e 1029
797bcff5
TBA
1030void
1031x86_target::low_arch_setup ()
3aee8918
PA
1032{
1033 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1034}
1035
82075af2
JS
1036/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1037 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1038
1039static void
4cc32bec 1040x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1041{
1042 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1043
1044 if (use_64bit)
1045 {
1046 long l_sysno;
82075af2
JS
1047
1048 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1049 *sysno = (int) l_sysno;
82075af2
JS
1050 }
1051 else
4cc32bec 1052 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1053}
1054
219f2f23
PA
1055static int
1056x86_supports_tracepoints (void)
1057{
1058 return 1;
1059}
1060
fa593d66
PA
1061static void
1062append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1063{
4196ab2a 1064 target_write_memory (*to, buf, len);
fa593d66
PA
1065 *to += len;
1066}
1067
1068static int
a121b7c1 1069push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1070{
1071 unsigned char *buf_org = buf;
1072
1073 while (1)
1074 {
1075 char *endptr;
1076 unsigned long ul = strtoul (op, &endptr, 16);
1077
1078 if (endptr == op)
1079 break;
1080
1081 *buf++ = ul;
1082 op = endptr;
1083 }
1084
1085 return buf - buf_org;
1086}
1087
1088#ifdef __x86_64__
1089
1090/* Build a jump pad that saves registers and calls a collection
1091 function. Writes a jump instruction to the jump pad to
1092 JJUMPAD_INSN. The caller is responsible to write it in at the
1093 tracepoint address. */
1094
1095static int
1096amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1097 CORE_ADDR collector,
1098 CORE_ADDR lockaddr,
1099 ULONGEST orig_size,
1100 CORE_ADDR *jump_entry,
405f8e94
SS
1101 CORE_ADDR *trampoline,
1102 ULONGEST *trampoline_size,
fa593d66
PA
1103 unsigned char *jjump_pad_insn,
1104 ULONGEST *jjump_pad_insn_size,
1105 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1106 CORE_ADDR *adjusted_insn_addr_end,
1107 char *err)
fa593d66
PA
1108{
1109 unsigned char buf[40];
1110 int i, offset;
f4647387
YQ
1111 int64_t loffset;
1112
fa593d66
PA
1113 CORE_ADDR buildaddr = *jump_entry;
1114
1115 /* Build the jump pad. */
1116
1117 /* First, do tracepoint data collection. Save registers. */
1118 i = 0;
1119 /* Need to ensure stack pointer saved first. */
1120 buf[i++] = 0x54; /* push %rsp */
1121 buf[i++] = 0x55; /* push %rbp */
1122 buf[i++] = 0x57; /* push %rdi */
1123 buf[i++] = 0x56; /* push %rsi */
1124 buf[i++] = 0x52; /* push %rdx */
1125 buf[i++] = 0x51; /* push %rcx */
1126 buf[i++] = 0x53; /* push %rbx */
1127 buf[i++] = 0x50; /* push %rax */
1128 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1129 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1130 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1131 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1132 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1133 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1134 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1135 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1136 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1137 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1138 buf[i++] = 0xbf;
c8ef42ee
PA
1139 memcpy (buf + i, &tpaddr, 8);
1140 i += 8;
fa593d66
PA
1141 buf[i++] = 0x57; /* push %rdi */
1142 append_insns (&buildaddr, i, buf);
1143
1144 /* Stack space for the collecting_t object. */
1145 i = 0;
1146 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1147 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1148 memcpy (buf + i, &tpoint, 8);
1149 i += 8;
1150 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1151 i += push_opcode (&buf[i],
1152 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1153 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1154 append_insns (&buildaddr, i, buf);
1155
1156 /* spin-lock. */
1157 i = 0;
1158 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1159 memcpy (&buf[i], (void *) &lockaddr, 8);
1160 i += 8;
1161 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1162 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1163 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1164 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1165 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1166 append_insns (&buildaddr, i, buf);
1167
1168 /* Set up the gdb_collect call. */
1169 /* At this point, (stack pointer + 0x18) is the base of our saved
1170 register block. */
1171
1172 i = 0;
1173 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1174 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1175
1176 /* tpoint address may be 64-bit wide. */
1177 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1178 memcpy (buf + i, &tpoint, 8);
1179 i += 8;
1180 append_insns (&buildaddr, i, buf);
1181
1182 /* The collector function being in the shared library, may be
1183 >31-bits away off the jump pad. */
1184 i = 0;
1185 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1186 memcpy (buf + i, &collector, 8);
1187 i += 8;
1188 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1189 append_insns (&buildaddr, i, buf);
1190
1191 /* Clear the spin-lock. */
1192 i = 0;
1193 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1194 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1195 memcpy (buf + i, &lockaddr, 8);
1196 i += 8;
1197 append_insns (&buildaddr, i, buf);
1198
1199 /* Remove stack that had been used for the collect_t object. */
1200 i = 0;
1201 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1202 append_insns (&buildaddr, i, buf);
1203
1204 /* Restore register state. */
1205 i = 0;
1206 buf[i++] = 0x48; /* add $0x8,%rsp */
1207 buf[i++] = 0x83;
1208 buf[i++] = 0xc4;
1209 buf[i++] = 0x08;
1210 buf[i++] = 0x9d; /* popfq */
1211 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1212 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1213 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1214 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1215 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1216 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1217 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1218 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1219 buf[i++] = 0x58; /* pop %rax */
1220 buf[i++] = 0x5b; /* pop %rbx */
1221 buf[i++] = 0x59; /* pop %rcx */
1222 buf[i++] = 0x5a; /* pop %rdx */
1223 buf[i++] = 0x5e; /* pop %rsi */
1224 buf[i++] = 0x5f; /* pop %rdi */
1225 buf[i++] = 0x5d; /* pop %rbp */
1226 buf[i++] = 0x5c; /* pop %rsp */
1227 append_insns (&buildaddr, i, buf);
1228
1229 /* Now, adjust the original instruction to execute in the jump
1230 pad. */
1231 *adjusted_insn_addr = buildaddr;
1232 relocate_instruction (&buildaddr, tpaddr);
1233 *adjusted_insn_addr_end = buildaddr;
1234
1235 /* Finally, write a jump back to the program. */
f4647387
YQ
1236
1237 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1238 if (loffset > INT_MAX || loffset < INT_MIN)
1239 {
1240 sprintf (err,
1241 "E.Jump back from jump pad too far from tracepoint "
1242 "(offset 0x%" PRIx64 " > int32).", loffset);
1243 return 1;
1244 }
1245
1246 offset = (int) loffset;
fa593d66
PA
1247 memcpy (buf, jump_insn, sizeof (jump_insn));
1248 memcpy (buf + 1, &offset, 4);
1249 append_insns (&buildaddr, sizeof (jump_insn), buf);
1250
1251 /* The jump pad is now built. Wire in a jump to our jump pad. This
1252 is always done last (by our caller actually), so that we can
1253 install fast tracepoints with threads running. This relies on
1254 the agent's atomic write support. */
f4647387
YQ
1255 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1256 if (loffset > INT_MAX || loffset < INT_MIN)
1257 {
1258 sprintf (err,
1259 "E.Jump pad too far from tracepoint "
1260 "(offset 0x%" PRIx64 " > int32).", loffset);
1261 return 1;
1262 }
1263
1264 offset = (int) loffset;
1265
fa593d66
PA
1266 memcpy (buf, jump_insn, sizeof (jump_insn));
1267 memcpy (buf + 1, &offset, 4);
1268 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1269 *jjump_pad_insn_size = sizeof (jump_insn);
1270
1271 /* Return the end address of our pad. */
1272 *jump_entry = buildaddr;
1273
1274 return 0;
1275}
1276
1277#endif /* __x86_64__ */
1278
1279/* Build a jump pad that saves registers and calls a collection
1280 function. Writes a jump instruction to the jump pad to
1281 JJUMPAD_INSN. The caller is responsible to write it in at the
1282 tracepoint address. */
1283
1284static int
1285i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1286 CORE_ADDR collector,
1287 CORE_ADDR lockaddr,
1288 ULONGEST orig_size,
1289 CORE_ADDR *jump_entry,
405f8e94
SS
1290 CORE_ADDR *trampoline,
1291 ULONGEST *trampoline_size,
fa593d66
PA
1292 unsigned char *jjump_pad_insn,
1293 ULONGEST *jjump_pad_insn_size,
1294 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1295 CORE_ADDR *adjusted_insn_addr_end,
1296 char *err)
fa593d66
PA
1297{
1298 unsigned char buf[0x100];
1299 int i, offset;
1300 CORE_ADDR buildaddr = *jump_entry;
1301
1302 /* Build the jump pad. */
1303
1304 /* First, do tracepoint data collection. Save registers. */
1305 i = 0;
1306 buf[i++] = 0x60; /* pushad */
1307 buf[i++] = 0x68; /* push tpaddr aka $pc */
1308 *((int *)(buf + i)) = (int) tpaddr;
1309 i += 4;
1310 buf[i++] = 0x9c; /* pushf */
1311 buf[i++] = 0x1e; /* push %ds */
1312 buf[i++] = 0x06; /* push %es */
1313 buf[i++] = 0x0f; /* push %fs */
1314 buf[i++] = 0xa0;
1315 buf[i++] = 0x0f; /* push %gs */
1316 buf[i++] = 0xa8;
1317 buf[i++] = 0x16; /* push %ss */
1318 buf[i++] = 0x0e; /* push %cs */
1319 append_insns (&buildaddr, i, buf);
1320
1321 /* Stack space for the collecting_t object. */
1322 i = 0;
1323 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1324
1325 /* Build the object. */
1326 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1327 memcpy (buf + i, &tpoint, 4);
1328 i += 4;
1329 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1330
1331 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1332 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1333 append_insns (&buildaddr, i, buf);
1334
1335 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1336 If we cared for it, this could be using xchg alternatively. */
1337
1338 i = 0;
1339 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1340 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1341 %esp,<lockaddr> */
1342 memcpy (&buf[i], (void *) &lockaddr, 4);
1343 i += 4;
1344 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1345 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1346 append_insns (&buildaddr, i, buf);
1347
1348
1349 /* Set up arguments to the gdb_collect call. */
1350 i = 0;
1351 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1352 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1353 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1354 append_insns (&buildaddr, i, buf);
1355
1356 i = 0;
1357 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1358 append_insns (&buildaddr, i, buf);
1359
1360 i = 0;
1361 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1362 memcpy (&buf[i], (void *) &tpoint, 4);
1363 i += 4;
1364 append_insns (&buildaddr, i, buf);
1365
1366 buf[0] = 0xe8; /* call <reladdr> */
1367 offset = collector - (buildaddr + sizeof (jump_insn));
1368 memcpy (buf + 1, &offset, 4);
1369 append_insns (&buildaddr, 5, buf);
1370 /* Clean up after the call. */
1371 buf[0] = 0x83; /* add $0x8,%esp */
1372 buf[1] = 0xc4;
1373 buf[2] = 0x08;
1374 append_insns (&buildaddr, 3, buf);
1375
1376
1377 /* Clear the spin-lock. This would need the LOCK prefix on older
1378 broken archs. */
1379 i = 0;
1380 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1381 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1382 memcpy (buf + i, &lockaddr, 4);
1383 i += 4;
1384 append_insns (&buildaddr, i, buf);
1385
1386
1387 /* Remove stack that had been used for the collect_t object. */
1388 i = 0;
1389 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1390 append_insns (&buildaddr, i, buf);
1391
1392 i = 0;
1393 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1394 buf[i++] = 0xc4;
1395 buf[i++] = 0x04;
1396 buf[i++] = 0x17; /* pop %ss */
1397 buf[i++] = 0x0f; /* pop %gs */
1398 buf[i++] = 0xa9;
1399 buf[i++] = 0x0f; /* pop %fs */
1400 buf[i++] = 0xa1;
1401 buf[i++] = 0x07; /* pop %es */
405f8e94 1402 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1403 buf[i++] = 0x9d; /* popf */
1404 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1405 buf[i++] = 0xc4;
1406 buf[i++] = 0x04;
1407 buf[i++] = 0x61; /* popad */
1408 append_insns (&buildaddr, i, buf);
1409
1410 /* Now, adjust the original instruction to execute in the jump
1411 pad. */
1412 *adjusted_insn_addr = buildaddr;
1413 relocate_instruction (&buildaddr, tpaddr);
1414 *adjusted_insn_addr_end = buildaddr;
1415
1416 /* Write the jump back to the program. */
1417 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1418 memcpy (buf, jump_insn, sizeof (jump_insn));
1419 memcpy (buf + 1, &offset, 4);
1420 append_insns (&buildaddr, sizeof (jump_insn), buf);
1421
1422 /* The jump pad is now built. Wire in a jump to our jump pad. This
1423 is always done last (by our caller actually), so that we can
1424 install fast tracepoints with threads running. This relies on
1425 the agent's atomic write support. */
405f8e94
SS
1426 if (orig_size == 4)
1427 {
1428 /* Create a trampoline. */
1429 *trampoline_size = sizeof (jump_insn);
1430 if (!claim_trampoline_space (*trampoline_size, trampoline))
1431 {
1432 /* No trampoline space available. */
1433 strcpy (err,
1434 "E.Cannot allocate trampoline space needed for fast "
1435 "tracepoints on 4-byte instructions.");
1436 return 1;
1437 }
1438
1439 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1440 memcpy (buf, jump_insn, sizeof (jump_insn));
1441 memcpy (buf + 1, &offset, 4);
4196ab2a 1442 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1443
1444 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1445 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1446 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1447 memcpy (buf + 2, &offset, 2);
1448 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1449 *jjump_pad_insn_size = sizeof (small_jump_insn);
1450 }
1451 else
1452 {
1453 /* Else use a 32-bit relative jump instruction. */
1454 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1455 memcpy (buf, jump_insn, sizeof (jump_insn));
1456 memcpy (buf + 1, &offset, 4);
1457 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1458 *jjump_pad_insn_size = sizeof (jump_insn);
1459 }
fa593d66
PA
1460
1461 /* Return the end address of our pad. */
1462 *jump_entry = buildaddr;
1463
1464 return 0;
1465}
1466
1467static int
1468x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1469 CORE_ADDR collector,
1470 CORE_ADDR lockaddr,
1471 ULONGEST orig_size,
1472 CORE_ADDR *jump_entry,
405f8e94
SS
1473 CORE_ADDR *trampoline,
1474 ULONGEST *trampoline_size,
fa593d66
PA
1475 unsigned char *jjump_pad_insn,
1476 ULONGEST *jjump_pad_insn_size,
1477 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1478 CORE_ADDR *adjusted_insn_addr_end,
1479 char *err)
fa593d66
PA
1480{
1481#ifdef __x86_64__
3aee8918 1482 if (is_64bit_tdesc ())
fa593d66
PA
1483 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1484 collector, lockaddr,
1485 orig_size, jump_entry,
405f8e94 1486 trampoline, trampoline_size,
fa593d66
PA
1487 jjump_pad_insn,
1488 jjump_pad_insn_size,
1489 adjusted_insn_addr,
405f8e94
SS
1490 adjusted_insn_addr_end,
1491 err);
fa593d66
PA
1492#endif
1493
1494 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1495 collector, lockaddr,
1496 orig_size, jump_entry,
405f8e94 1497 trampoline, trampoline_size,
fa593d66
PA
1498 jjump_pad_insn,
1499 jjump_pad_insn_size,
1500 adjusted_insn_addr,
405f8e94
SS
1501 adjusted_insn_addr_end,
1502 err);
1503}
1504
1505/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1506 architectures. */
1507
1508static int
1509x86_get_min_fast_tracepoint_insn_len (void)
1510{
1511 static int warned_about_fast_tracepoints = 0;
1512
1513#ifdef __x86_64__
1514 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1515 used for fast tracepoints. */
3aee8918 1516 if (is_64bit_tdesc ())
405f8e94
SS
1517 return 5;
1518#endif
1519
58b4daa5 1520 if (agent_loaded_p ())
405f8e94
SS
1521 {
1522 char errbuf[IPA_BUFSIZ];
1523
1524 errbuf[0] = '\0';
1525
1526 /* On x86, if trampolines are available, then 4-byte jump instructions
1527 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1528 with a 4-byte offset are used instead. */
1529 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1530 return 4;
1531 else
1532 {
1533 /* GDB has no channel to explain to user why a shorter fast
1534 tracepoint is not possible, but at least make GDBserver
1535 mention that something has gone awry. */
1536 if (!warned_about_fast_tracepoints)
1537 {
422186a9 1538 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1539 warned_about_fast_tracepoints = 1;
1540 }
1541 return 5;
1542 }
1543 }
1544 else
1545 {
1546 /* Indicate that the minimum length is currently unknown since the IPA
1547 has not loaded yet. */
1548 return 0;
1549 }
fa593d66
PA
1550}
1551
6a271cae
PA
1552static void
1553add_insns (unsigned char *start, int len)
1554{
1555 CORE_ADDR buildaddr = current_insn_ptr;
1556
1557 if (debug_threads)
87ce2a04
DE
1558 debug_printf ("Adding %d bytes of insn at %s\n",
1559 len, paddress (buildaddr));
6a271cae
PA
1560
1561 append_insns (&buildaddr, len, start);
1562 current_insn_ptr = buildaddr;
1563}
1564
6a271cae
PA
1565/* Our general strategy for emitting code is to avoid specifying raw
1566 bytes whenever possible, and instead copy a block of inline asm
1567 that is embedded in the function. This is a little messy, because
1568 we need to keep the compiler from discarding what looks like dead
1569 code, plus suppress various warnings. */
1570
9e4344e5
PA
1571#define EMIT_ASM(NAME, INSNS) \
1572 do \
1573 { \
1574 extern unsigned char start_ ## NAME, end_ ## NAME; \
1575 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1576 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1577 "\t" "start_" #NAME ":" \
1578 "\t" INSNS "\n" \
1579 "\t" "end_" #NAME ":"); \
1580 } while (0)
6a271cae
PA
1581
1582#ifdef __x86_64__
1583
1584#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1585 do \
1586 { \
1587 extern unsigned char start_ ## NAME, end_ ## NAME; \
1588 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1589 __asm__ (".code32\n" \
1590 "\t" "jmp end_" #NAME "\n" \
1591 "\t" "start_" #NAME ":\n" \
1592 "\t" INSNS "\n" \
1593 "\t" "end_" #NAME ":\n" \
1594 ".code64\n"); \
1595 } while (0)
6a271cae
PA
1596
1597#else
1598
1599#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1600
1601#endif
1602
1603#ifdef __x86_64__
1604
1605static void
1606amd64_emit_prologue (void)
1607{
1608 EMIT_ASM (amd64_prologue,
1609 "pushq %rbp\n\t"
1610 "movq %rsp,%rbp\n\t"
1611 "sub $0x20,%rsp\n\t"
1612 "movq %rdi,-8(%rbp)\n\t"
1613 "movq %rsi,-16(%rbp)");
1614}
1615
1616
1617static void
1618amd64_emit_epilogue (void)
1619{
1620 EMIT_ASM (amd64_epilogue,
1621 "movq -16(%rbp),%rdi\n\t"
1622 "movq %rax,(%rdi)\n\t"
1623 "xor %rax,%rax\n\t"
1624 "leave\n\t"
1625 "ret");
1626}
1627
1628static void
1629amd64_emit_add (void)
1630{
1631 EMIT_ASM (amd64_add,
1632 "add (%rsp),%rax\n\t"
1633 "lea 0x8(%rsp),%rsp");
1634}
1635
1636static void
1637amd64_emit_sub (void)
1638{
1639 EMIT_ASM (amd64_sub,
1640 "sub %rax,(%rsp)\n\t"
1641 "pop %rax");
1642}
1643
1644static void
1645amd64_emit_mul (void)
1646{
1647 emit_error = 1;
1648}
1649
1650static void
1651amd64_emit_lsh (void)
1652{
1653 emit_error = 1;
1654}
1655
1656static void
1657amd64_emit_rsh_signed (void)
1658{
1659 emit_error = 1;
1660}
1661
1662static void
1663amd64_emit_rsh_unsigned (void)
1664{
1665 emit_error = 1;
1666}
1667
1668static void
1669amd64_emit_ext (int arg)
1670{
1671 switch (arg)
1672 {
1673 case 8:
1674 EMIT_ASM (amd64_ext_8,
1675 "cbtw\n\t"
1676 "cwtl\n\t"
1677 "cltq");
1678 break;
1679 case 16:
1680 EMIT_ASM (amd64_ext_16,
1681 "cwtl\n\t"
1682 "cltq");
1683 break;
1684 case 32:
1685 EMIT_ASM (amd64_ext_32,
1686 "cltq");
1687 break;
1688 default:
1689 emit_error = 1;
1690 }
1691}
1692
1693static void
1694amd64_emit_log_not (void)
1695{
1696 EMIT_ASM (amd64_log_not,
1697 "test %rax,%rax\n\t"
1698 "sete %cl\n\t"
1699 "movzbq %cl,%rax");
1700}
1701
1702static void
1703amd64_emit_bit_and (void)
1704{
1705 EMIT_ASM (amd64_and,
1706 "and (%rsp),%rax\n\t"
1707 "lea 0x8(%rsp),%rsp");
1708}
1709
1710static void
1711amd64_emit_bit_or (void)
1712{
1713 EMIT_ASM (amd64_or,
1714 "or (%rsp),%rax\n\t"
1715 "lea 0x8(%rsp),%rsp");
1716}
1717
1718static void
1719amd64_emit_bit_xor (void)
1720{
1721 EMIT_ASM (amd64_xor,
1722 "xor (%rsp),%rax\n\t"
1723 "lea 0x8(%rsp),%rsp");
1724}
1725
1726static void
1727amd64_emit_bit_not (void)
1728{
1729 EMIT_ASM (amd64_bit_not,
1730 "xorq $0xffffffffffffffff,%rax");
1731}
1732
1733static void
1734amd64_emit_equal (void)
1735{
1736 EMIT_ASM (amd64_equal,
1737 "cmp %rax,(%rsp)\n\t"
1738 "je .Lamd64_equal_true\n\t"
1739 "xor %rax,%rax\n\t"
1740 "jmp .Lamd64_equal_end\n\t"
1741 ".Lamd64_equal_true:\n\t"
1742 "mov $0x1,%rax\n\t"
1743 ".Lamd64_equal_end:\n\t"
1744 "lea 0x8(%rsp),%rsp");
1745}
1746
1747static void
1748amd64_emit_less_signed (void)
1749{
1750 EMIT_ASM (amd64_less_signed,
1751 "cmp %rax,(%rsp)\n\t"
1752 "jl .Lamd64_less_signed_true\n\t"
1753 "xor %rax,%rax\n\t"
1754 "jmp .Lamd64_less_signed_end\n\t"
1755 ".Lamd64_less_signed_true:\n\t"
1756 "mov $1,%rax\n\t"
1757 ".Lamd64_less_signed_end:\n\t"
1758 "lea 0x8(%rsp),%rsp");
1759}
1760
1761static void
1762amd64_emit_less_unsigned (void)
1763{
1764 EMIT_ASM (amd64_less_unsigned,
1765 "cmp %rax,(%rsp)\n\t"
1766 "jb .Lamd64_less_unsigned_true\n\t"
1767 "xor %rax,%rax\n\t"
1768 "jmp .Lamd64_less_unsigned_end\n\t"
1769 ".Lamd64_less_unsigned_true:\n\t"
1770 "mov $1,%rax\n\t"
1771 ".Lamd64_less_unsigned_end:\n\t"
1772 "lea 0x8(%rsp),%rsp");
1773}
1774
1775static void
1776amd64_emit_ref (int size)
1777{
1778 switch (size)
1779 {
1780 case 1:
1781 EMIT_ASM (amd64_ref1,
1782 "movb (%rax),%al");
1783 break;
1784 case 2:
1785 EMIT_ASM (amd64_ref2,
1786 "movw (%rax),%ax");
1787 break;
1788 case 4:
1789 EMIT_ASM (amd64_ref4,
1790 "movl (%rax),%eax");
1791 break;
1792 case 8:
1793 EMIT_ASM (amd64_ref8,
1794 "movq (%rax),%rax");
1795 break;
1796 }
1797}
1798
1799static void
1800amd64_emit_if_goto (int *offset_p, int *size_p)
1801{
1802 EMIT_ASM (amd64_if_goto,
1803 "mov %rax,%rcx\n\t"
1804 "pop %rax\n\t"
1805 "cmp $0,%rcx\n\t"
1806 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1807 if (offset_p)
1808 *offset_p = 10;
1809 if (size_p)
1810 *size_p = 4;
1811}
1812
1813static void
1814amd64_emit_goto (int *offset_p, int *size_p)
1815{
1816 EMIT_ASM (amd64_goto,
1817 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1818 if (offset_p)
1819 *offset_p = 1;
1820 if (size_p)
1821 *size_p = 4;
1822}
1823
1824static void
1825amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1826{
1827 int diff = (to - (from + size));
1828 unsigned char buf[sizeof (int)];
1829
1830 if (size != 4)
1831 {
1832 emit_error = 1;
1833 return;
1834 }
1835
1836 memcpy (buf, &diff, sizeof (int));
4196ab2a 1837 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1838}
1839
1840static void
4e29fb54 1841amd64_emit_const (LONGEST num)
6a271cae
PA
1842{
1843 unsigned char buf[16];
1844 int i;
1845 CORE_ADDR buildaddr = current_insn_ptr;
1846
1847 i = 0;
1848 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1849 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1850 i += 8;
1851 append_insns (&buildaddr, i, buf);
1852 current_insn_ptr = buildaddr;
1853}
1854
1855static void
1856amd64_emit_call (CORE_ADDR fn)
1857{
1858 unsigned char buf[16];
1859 int i;
1860 CORE_ADDR buildaddr;
4e29fb54 1861 LONGEST offset64;
6a271cae
PA
1862
1863 /* The destination function being in the shared library, may be
1864 >31-bits away off the compiled code pad. */
1865
1866 buildaddr = current_insn_ptr;
1867
1868 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1869
1870 i = 0;
1871
1872 if (offset64 > INT_MAX || offset64 < INT_MIN)
1873 {
1874 /* Offset is too large for a call. Use callq, but that requires
1875 a register, so avoid it if possible. Use r10, since it is
1876 call-clobbered, we don't have to push/pop it. */
1877 buf[i++] = 0x48; /* mov $fn,%r10 */
1878 buf[i++] = 0xba;
1879 memcpy (buf + i, &fn, 8);
1880 i += 8;
1881 buf[i++] = 0xff; /* callq *%r10 */
1882 buf[i++] = 0xd2;
1883 }
1884 else
1885 {
1886 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1887
1888 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1889 memcpy (buf + i, &offset32, 4);
1890 i += 4;
1891 }
1892
1893 append_insns (&buildaddr, i, buf);
1894 current_insn_ptr = buildaddr;
1895}
1896
1897static void
1898amd64_emit_reg (int reg)
1899{
1900 unsigned char buf[16];
1901 int i;
1902 CORE_ADDR buildaddr;
1903
1904 /* Assume raw_regs is still in %rdi. */
1905 buildaddr = current_insn_ptr;
1906 i = 0;
1907 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1908 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1909 i += 4;
1910 append_insns (&buildaddr, i, buf);
1911 current_insn_ptr = buildaddr;
1912 amd64_emit_call (get_raw_reg_func_addr ());
1913}
1914
1915static void
1916amd64_emit_pop (void)
1917{
1918 EMIT_ASM (amd64_pop,
1919 "pop %rax");
1920}
1921
1922static void
1923amd64_emit_stack_flush (void)
1924{
1925 EMIT_ASM (amd64_stack_flush,
1926 "push %rax");
1927}
1928
1929static void
1930amd64_emit_zero_ext (int arg)
1931{
1932 switch (arg)
1933 {
1934 case 8:
1935 EMIT_ASM (amd64_zero_ext_8,
1936 "and $0xff,%rax");
1937 break;
1938 case 16:
1939 EMIT_ASM (amd64_zero_ext_16,
1940 "and $0xffff,%rax");
1941 break;
1942 case 32:
1943 EMIT_ASM (amd64_zero_ext_32,
1944 "mov $0xffffffff,%rcx\n\t"
1945 "and %rcx,%rax");
1946 break;
1947 default:
1948 emit_error = 1;
1949 }
1950}
1951
1952static void
1953amd64_emit_swap (void)
1954{
1955 EMIT_ASM (amd64_swap,
1956 "mov %rax,%rcx\n\t"
1957 "pop %rax\n\t"
1958 "push %rcx");
1959}
1960
1961static void
1962amd64_emit_stack_adjust (int n)
1963{
1964 unsigned char buf[16];
1965 int i;
1966 CORE_ADDR buildaddr = current_insn_ptr;
1967
1968 i = 0;
1969 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1970 buf[i++] = 0x8d;
1971 buf[i++] = 0x64;
1972 buf[i++] = 0x24;
1973 /* This only handles adjustments up to 16, but we don't expect any more. */
1974 buf[i++] = n * 8;
1975 append_insns (&buildaddr, i, buf);
1976 current_insn_ptr = buildaddr;
1977}
1978
1979/* FN's prototype is `LONGEST(*fn)(int)'. */
1980
1981static void
1982amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1983{
1984 unsigned char buf[16];
1985 int i;
1986 CORE_ADDR buildaddr;
1987
1988 buildaddr = current_insn_ptr;
1989 i = 0;
1990 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1991 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1992 i += 4;
1993 append_insns (&buildaddr, i, buf);
1994 current_insn_ptr = buildaddr;
1995 amd64_emit_call (fn);
1996}
1997
4e29fb54 1998/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1999
2000static void
2001amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2002{
2003 unsigned char buf[16];
2004 int i;
2005 CORE_ADDR buildaddr;
2006
2007 buildaddr = current_insn_ptr;
2008 i = 0;
2009 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2010 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2011 i += 4;
2012 append_insns (&buildaddr, i, buf);
2013 current_insn_ptr = buildaddr;
2014 EMIT_ASM (amd64_void_call_2_a,
2015 /* Save away a copy of the stack top. */
2016 "push %rax\n\t"
2017 /* Also pass top as the second argument. */
2018 "mov %rax,%rsi");
2019 amd64_emit_call (fn);
2020 EMIT_ASM (amd64_void_call_2_b,
2021 /* Restore the stack top, %rax may have been trashed. */
2022 "pop %rax");
2023}
2024
df4a0200 2025static void
6b9801d4
SS
2026amd64_emit_eq_goto (int *offset_p, int *size_p)
2027{
2028 EMIT_ASM (amd64_eq,
2029 "cmp %rax,(%rsp)\n\t"
2030 "jne .Lamd64_eq_fallthru\n\t"
2031 "lea 0x8(%rsp),%rsp\n\t"
2032 "pop %rax\n\t"
2033 /* jmp, but don't trust the assembler to choose the right jump */
2034 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2035 ".Lamd64_eq_fallthru:\n\t"
2036 "lea 0x8(%rsp),%rsp\n\t"
2037 "pop %rax");
2038
2039 if (offset_p)
2040 *offset_p = 13;
2041 if (size_p)
2042 *size_p = 4;
2043}
2044
df4a0200 2045static void
6b9801d4
SS
2046amd64_emit_ne_goto (int *offset_p, int *size_p)
2047{
2048 EMIT_ASM (amd64_ne,
2049 "cmp %rax,(%rsp)\n\t"
2050 "je .Lamd64_ne_fallthru\n\t"
2051 "lea 0x8(%rsp),%rsp\n\t"
2052 "pop %rax\n\t"
2053 /* jmp, but don't trust the assembler to choose the right jump */
2054 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2055 ".Lamd64_ne_fallthru:\n\t"
2056 "lea 0x8(%rsp),%rsp\n\t"
2057 "pop %rax");
2058
2059 if (offset_p)
2060 *offset_p = 13;
2061 if (size_p)
2062 *size_p = 4;
2063}
2064
df4a0200 2065static void
6b9801d4
SS
2066amd64_emit_lt_goto (int *offset_p, int *size_p)
2067{
2068 EMIT_ASM (amd64_lt,
2069 "cmp %rax,(%rsp)\n\t"
2070 "jnl .Lamd64_lt_fallthru\n\t"
2071 "lea 0x8(%rsp),%rsp\n\t"
2072 "pop %rax\n\t"
2073 /* jmp, but don't trust the assembler to choose the right jump */
2074 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2075 ".Lamd64_lt_fallthru:\n\t"
2076 "lea 0x8(%rsp),%rsp\n\t"
2077 "pop %rax");
2078
2079 if (offset_p)
2080 *offset_p = 13;
2081 if (size_p)
2082 *size_p = 4;
2083}
2084
df4a0200 2085static void
6b9801d4
SS
2086amd64_emit_le_goto (int *offset_p, int *size_p)
2087{
2088 EMIT_ASM (amd64_le,
2089 "cmp %rax,(%rsp)\n\t"
2090 "jnle .Lamd64_le_fallthru\n\t"
2091 "lea 0x8(%rsp),%rsp\n\t"
2092 "pop %rax\n\t"
2093 /* jmp, but don't trust the assembler to choose the right jump */
2094 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2095 ".Lamd64_le_fallthru:\n\t"
2096 "lea 0x8(%rsp),%rsp\n\t"
2097 "pop %rax");
2098
2099 if (offset_p)
2100 *offset_p = 13;
2101 if (size_p)
2102 *size_p = 4;
2103}
2104
df4a0200 2105static void
6b9801d4
SS
2106amd64_emit_gt_goto (int *offset_p, int *size_p)
2107{
2108 EMIT_ASM (amd64_gt,
2109 "cmp %rax,(%rsp)\n\t"
2110 "jng .Lamd64_gt_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_gt_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2117 "pop %rax");
2118
2119 if (offset_p)
2120 *offset_p = 13;
2121 if (size_p)
2122 *size_p = 4;
2123}
2124
df4a0200 2125static void
6b9801d4
SS
2126amd64_emit_ge_goto (int *offset_p, int *size_p)
2127{
2128 EMIT_ASM (amd64_ge,
2129 "cmp %rax,(%rsp)\n\t"
2130 "jnge .Lamd64_ge_fallthru\n\t"
2131 ".Lamd64_ge_jump:\n\t"
2132 "lea 0x8(%rsp),%rsp\n\t"
2133 "pop %rax\n\t"
2134 /* jmp, but don't trust the assembler to choose the right jump */
2135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2136 ".Lamd64_ge_fallthru:\n\t"
2137 "lea 0x8(%rsp),%rsp\n\t"
2138 "pop %rax");
2139
2140 if (offset_p)
2141 *offset_p = 13;
2142 if (size_p)
2143 *size_p = 4;
2144}
2145
6a271cae
PA
2146struct emit_ops amd64_emit_ops =
2147 {
2148 amd64_emit_prologue,
2149 amd64_emit_epilogue,
2150 amd64_emit_add,
2151 amd64_emit_sub,
2152 amd64_emit_mul,
2153 amd64_emit_lsh,
2154 amd64_emit_rsh_signed,
2155 amd64_emit_rsh_unsigned,
2156 amd64_emit_ext,
2157 amd64_emit_log_not,
2158 amd64_emit_bit_and,
2159 amd64_emit_bit_or,
2160 amd64_emit_bit_xor,
2161 amd64_emit_bit_not,
2162 amd64_emit_equal,
2163 amd64_emit_less_signed,
2164 amd64_emit_less_unsigned,
2165 amd64_emit_ref,
2166 amd64_emit_if_goto,
2167 amd64_emit_goto,
2168 amd64_write_goto_address,
2169 amd64_emit_const,
2170 amd64_emit_call,
2171 amd64_emit_reg,
2172 amd64_emit_pop,
2173 amd64_emit_stack_flush,
2174 amd64_emit_zero_ext,
2175 amd64_emit_swap,
2176 amd64_emit_stack_adjust,
2177 amd64_emit_int_call_1,
6b9801d4
SS
2178 amd64_emit_void_call_2,
2179 amd64_emit_eq_goto,
2180 amd64_emit_ne_goto,
2181 amd64_emit_lt_goto,
2182 amd64_emit_le_goto,
2183 amd64_emit_gt_goto,
2184 amd64_emit_ge_goto
6a271cae
PA
2185 };
2186
2187#endif /* __x86_64__ */
2188
2189static void
2190i386_emit_prologue (void)
2191{
2192 EMIT_ASM32 (i386_prologue,
2193 "push %ebp\n\t"
bf15cbda
SS
2194 "mov %esp,%ebp\n\t"
2195 "push %ebx");
6a271cae
PA
2196 /* At this point, the raw regs base address is at 8(%ebp), and the
2197 value pointer is at 12(%ebp). */
2198}
2199
2200static void
2201i386_emit_epilogue (void)
2202{
2203 EMIT_ASM32 (i386_epilogue,
2204 "mov 12(%ebp),%ecx\n\t"
2205 "mov %eax,(%ecx)\n\t"
2206 "mov %ebx,0x4(%ecx)\n\t"
2207 "xor %eax,%eax\n\t"
bf15cbda 2208 "pop %ebx\n\t"
6a271cae
PA
2209 "pop %ebp\n\t"
2210 "ret");
2211}
2212
2213static void
2214i386_emit_add (void)
2215{
2216 EMIT_ASM32 (i386_add,
2217 "add (%esp),%eax\n\t"
2218 "adc 0x4(%esp),%ebx\n\t"
2219 "lea 0x8(%esp),%esp");
2220}
2221
2222static void
2223i386_emit_sub (void)
2224{
2225 EMIT_ASM32 (i386_sub,
2226 "subl %eax,(%esp)\n\t"
2227 "sbbl %ebx,4(%esp)\n\t"
2228 "pop %eax\n\t"
2229 "pop %ebx\n\t");
2230}
2231
2232static void
2233i386_emit_mul (void)
2234{
2235 emit_error = 1;
2236}
2237
2238static void
2239i386_emit_lsh (void)
2240{
2241 emit_error = 1;
2242}
2243
2244static void
2245i386_emit_rsh_signed (void)
2246{
2247 emit_error = 1;
2248}
2249
2250static void
2251i386_emit_rsh_unsigned (void)
2252{
2253 emit_error = 1;
2254}
2255
2256static void
2257i386_emit_ext (int arg)
2258{
2259 switch (arg)
2260 {
2261 case 8:
2262 EMIT_ASM32 (i386_ext_8,
2263 "cbtw\n\t"
2264 "cwtl\n\t"
2265 "movl %eax,%ebx\n\t"
2266 "sarl $31,%ebx");
2267 break;
2268 case 16:
2269 EMIT_ASM32 (i386_ext_16,
2270 "cwtl\n\t"
2271 "movl %eax,%ebx\n\t"
2272 "sarl $31,%ebx");
2273 break;
2274 case 32:
2275 EMIT_ASM32 (i386_ext_32,
2276 "movl %eax,%ebx\n\t"
2277 "sarl $31,%ebx");
2278 break;
2279 default:
2280 emit_error = 1;
2281 }
2282}
2283
2284static void
2285i386_emit_log_not (void)
2286{
2287 EMIT_ASM32 (i386_log_not,
2288 "or %ebx,%eax\n\t"
2289 "test %eax,%eax\n\t"
2290 "sete %cl\n\t"
2291 "xor %ebx,%ebx\n\t"
2292 "movzbl %cl,%eax");
2293}
2294
2295static void
2296i386_emit_bit_and (void)
2297{
2298 EMIT_ASM32 (i386_and,
2299 "and (%esp),%eax\n\t"
2300 "and 0x4(%esp),%ebx\n\t"
2301 "lea 0x8(%esp),%esp");
2302}
2303
2304static void
2305i386_emit_bit_or (void)
2306{
2307 EMIT_ASM32 (i386_or,
2308 "or (%esp),%eax\n\t"
2309 "or 0x4(%esp),%ebx\n\t"
2310 "lea 0x8(%esp),%esp");
2311}
2312
2313static void
2314i386_emit_bit_xor (void)
2315{
2316 EMIT_ASM32 (i386_xor,
2317 "xor (%esp),%eax\n\t"
2318 "xor 0x4(%esp),%ebx\n\t"
2319 "lea 0x8(%esp),%esp");
2320}
2321
2322static void
2323i386_emit_bit_not (void)
2324{
2325 EMIT_ASM32 (i386_bit_not,
2326 "xor $0xffffffff,%eax\n\t"
2327 "xor $0xffffffff,%ebx\n\t");
2328}
2329
2330static void
2331i386_emit_equal (void)
2332{
2333 EMIT_ASM32 (i386_equal,
2334 "cmpl %ebx,4(%esp)\n\t"
2335 "jne .Li386_equal_false\n\t"
2336 "cmpl %eax,(%esp)\n\t"
2337 "je .Li386_equal_true\n\t"
2338 ".Li386_equal_false:\n\t"
2339 "xor %eax,%eax\n\t"
2340 "jmp .Li386_equal_end\n\t"
2341 ".Li386_equal_true:\n\t"
2342 "mov $1,%eax\n\t"
2343 ".Li386_equal_end:\n\t"
2344 "xor %ebx,%ebx\n\t"
2345 "lea 0x8(%esp),%esp");
2346}
2347
2348static void
2349i386_emit_less_signed (void)
2350{
2351 EMIT_ASM32 (i386_less_signed,
2352 "cmpl %ebx,4(%esp)\n\t"
2353 "jl .Li386_less_signed_true\n\t"
2354 "jne .Li386_less_signed_false\n\t"
2355 "cmpl %eax,(%esp)\n\t"
2356 "jl .Li386_less_signed_true\n\t"
2357 ".Li386_less_signed_false:\n\t"
2358 "xor %eax,%eax\n\t"
2359 "jmp .Li386_less_signed_end\n\t"
2360 ".Li386_less_signed_true:\n\t"
2361 "mov $1,%eax\n\t"
2362 ".Li386_less_signed_end:\n\t"
2363 "xor %ebx,%ebx\n\t"
2364 "lea 0x8(%esp),%esp");
2365}
2366
2367static void
2368i386_emit_less_unsigned (void)
2369{
2370 EMIT_ASM32 (i386_less_unsigned,
2371 "cmpl %ebx,4(%esp)\n\t"
2372 "jb .Li386_less_unsigned_true\n\t"
2373 "jne .Li386_less_unsigned_false\n\t"
2374 "cmpl %eax,(%esp)\n\t"
2375 "jb .Li386_less_unsigned_true\n\t"
2376 ".Li386_less_unsigned_false:\n\t"
2377 "xor %eax,%eax\n\t"
2378 "jmp .Li386_less_unsigned_end\n\t"
2379 ".Li386_less_unsigned_true:\n\t"
2380 "mov $1,%eax\n\t"
2381 ".Li386_less_unsigned_end:\n\t"
2382 "xor %ebx,%ebx\n\t"
2383 "lea 0x8(%esp),%esp");
2384}
2385
2386static void
2387i386_emit_ref (int size)
2388{
2389 switch (size)
2390 {
2391 case 1:
2392 EMIT_ASM32 (i386_ref1,
2393 "movb (%eax),%al");
2394 break;
2395 case 2:
2396 EMIT_ASM32 (i386_ref2,
2397 "movw (%eax),%ax");
2398 break;
2399 case 4:
2400 EMIT_ASM32 (i386_ref4,
2401 "movl (%eax),%eax");
2402 break;
2403 case 8:
2404 EMIT_ASM32 (i386_ref8,
2405 "movl 4(%eax),%ebx\n\t"
2406 "movl (%eax),%eax");
2407 break;
2408 }
2409}
2410
2411static void
2412i386_emit_if_goto (int *offset_p, int *size_p)
2413{
2414 EMIT_ASM32 (i386_if_goto,
2415 "mov %eax,%ecx\n\t"
2416 "or %ebx,%ecx\n\t"
2417 "pop %eax\n\t"
2418 "pop %ebx\n\t"
2419 "cmpl $0,%ecx\n\t"
2420 /* Don't trust the assembler to choose the right jump */
2421 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2422
2423 if (offset_p)
2424 *offset_p = 11; /* be sure that this matches the sequence above */
2425 if (size_p)
2426 *size_p = 4;
2427}
2428
2429static void
2430i386_emit_goto (int *offset_p, int *size_p)
2431{
2432 EMIT_ASM32 (i386_goto,
2433 /* Don't trust the assembler to choose the right jump */
2434 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2435 if (offset_p)
2436 *offset_p = 1;
2437 if (size_p)
2438 *size_p = 4;
2439}
2440
2441static void
2442i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2443{
2444 int diff = (to - (from + size));
2445 unsigned char buf[sizeof (int)];
2446
2447 /* We're only doing 4-byte sizes at the moment. */
2448 if (size != 4)
2449 {
2450 emit_error = 1;
2451 return;
2452 }
2453
2454 memcpy (buf, &diff, sizeof (int));
4196ab2a 2455 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2456}
2457
2458static void
4e29fb54 2459i386_emit_const (LONGEST num)
6a271cae
PA
2460{
2461 unsigned char buf[16];
b00ad6ff 2462 int i, hi, lo;
6a271cae
PA
2463 CORE_ADDR buildaddr = current_insn_ptr;
2464
2465 i = 0;
2466 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2467 lo = num & 0xffffffff;
2468 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2469 i += 4;
2470 hi = ((num >> 32) & 0xffffffff);
2471 if (hi)
2472 {
2473 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2474 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2475 i += 4;
2476 }
2477 else
2478 {
2479 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2480 }
2481 append_insns (&buildaddr, i, buf);
2482 current_insn_ptr = buildaddr;
2483}
2484
2485static void
2486i386_emit_call (CORE_ADDR fn)
2487{
2488 unsigned char buf[16];
2489 int i, offset;
2490 CORE_ADDR buildaddr;
2491
2492 buildaddr = current_insn_ptr;
2493 i = 0;
2494 buf[i++] = 0xe8; /* call <reladdr> */
2495 offset = ((int) fn) - (buildaddr + 5);
2496 memcpy (buf + 1, &offset, 4);
2497 append_insns (&buildaddr, 5, buf);
2498 current_insn_ptr = buildaddr;
2499}
2500
2501static void
2502i386_emit_reg (int reg)
2503{
2504 unsigned char buf[16];
2505 int i;
2506 CORE_ADDR buildaddr;
2507
2508 EMIT_ASM32 (i386_reg_a,
2509 "sub $0x8,%esp");
2510 buildaddr = current_insn_ptr;
2511 i = 0;
2512 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2513 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2514 i += 4;
2515 append_insns (&buildaddr, i, buf);
2516 current_insn_ptr = buildaddr;
2517 EMIT_ASM32 (i386_reg_b,
2518 "mov %eax,4(%esp)\n\t"
2519 "mov 8(%ebp),%eax\n\t"
2520 "mov %eax,(%esp)");
2521 i386_emit_call (get_raw_reg_func_addr ());
2522 EMIT_ASM32 (i386_reg_c,
2523 "xor %ebx,%ebx\n\t"
2524 "lea 0x8(%esp),%esp");
2525}
2526
2527static void
2528i386_emit_pop (void)
2529{
2530 EMIT_ASM32 (i386_pop,
2531 "pop %eax\n\t"
2532 "pop %ebx");
2533}
2534
2535static void
2536i386_emit_stack_flush (void)
2537{
2538 EMIT_ASM32 (i386_stack_flush,
2539 "push %ebx\n\t"
2540 "push %eax");
2541}
2542
2543static void
2544i386_emit_zero_ext (int arg)
2545{
2546 switch (arg)
2547 {
2548 case 8:
2549 EMIT_ASM32 (i386_zero_ext_8,
2550 "and $0xff,%eax\n\t"
2551 "xor %ebx,%ebx");
2552 break;
2553 case 16:
2554 EMIT_ASM32 (i386_zero_ext_16,
2555 "and $0xffff,%eax\n\t"
2556 "xor %ebx,%ebx");
2557 break;
2558 case 32:
2559 EMIT_ASM32 (i386_zero_ext_32,
2560 "xor %ebx,%ebx");
2561 break;
2562 default:
2563 emit_error = 1;
2564 }
2565}
2566
2567static void
2568i386_emit_swap (void)
2569{
2570 EMIT_ASM32 (i386_swap,
2571 "mov %eax,%ecx\n\t"
2572 "mov %ebx,%edx\n\t"
2573 "pop %eax\n\t"
2574 "pop %ebx\n\t"
2575 "push %edx\n\t"
2576 "push %ecx");
2577}
2578
2579static void
2580i386_emit_stack_adjust (int n)
2581{
2582 unsigned char buf[16];
2583 int i;
2584 CORE_ADDR buildaddr = current_insn_ptr;
2585
2586 i = 0;
2587 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2588 buf[i++] = 0x64;
2589 buf[i++] = 0x24;
2590 buf[i++] = n * 8;
2591 append_insns (&buildaddr, i, buf);
2592 current_insn_ptr = buildaddr;
2593}
2594
2595/* FN's prototype is `LONGEST(*fn)(int)'. */
2596
2597static void
2598i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2599{
2600 unsigned char buf[16];
2601 int i;
2602 CORE_ADDR buildaddr;
2603
2604 EMIT_ASM32 (i386_int_call_1_a,
2605 /* Reserve a bit of stack space. */
2606 "sub $0x8,%esp");
2607 /* Put the one argument on the stack. */
2608 buildaddr = current_insn_ptr;
2609 i = 0;
2610 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2611 buf[i++] = 0x04;
2612 buf[i++] = 0x24;
b00ad6ff 2613 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2614 i += 4;
2615 append_insns (&buildaddr, i, buf);
2616 current_insn_ptr = buildaddr;
2617 i386_emit_call (fn);
2618 EMIT_ASM32 (i386_int_call_1_c,
2619 "mov %edx,%ebx\n\t"
2620 "lea 0x8(%esp),%esp");
2621}
2622
4e29fb54 2623/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2624
2625static void
2626i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2627{
2628 unsigned char buf[16];
2629 int i;
2630 CORE_ADDR buildaddr;
2631
2632 EMIT_ASM32 (i386_void_call_2_a,
2633 /* Preserve %eax only; we don't have to worry about %ebx. */
2634 "push %eax\n\t"
2635 /* Reserve a bit of stack space for arguments. */
2636 "sub $0x10,%esp\n\t"
2637 /* Copy "top" to the second argument position. (Note that
2638 we can't assume function won't scribble on its
2639 arguments, so don't try to restore from this.) */
2640 "mov %eax,4(%esp)\n\t"
2641 "mov %ebx,8(%esp)");
2642 /* Put the first argument on the stack. */
2643 buildaddr = current_insn_ptr;
2644 i = 0;
2645 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2646 buf[i++] = 0x04;
2647 buf[i++] = 0x24;
b00ad6ff 2648 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2649 i += 4;
2650 append_insns (&buildaddr, i, buf);
2651 current_insn_ptr = buildaddr;
2652 i386_emit_call (fn);
2653 EMIT_ASM32 (i386_void_call_2_b,
2654 "lea 0x10(%esp),%esp\n\t"
2655 /* Restore original stack top. */
2656 "pop %eax");
2657}
2658
6b9801d4 2659
df4a0200 2660static void
6b9801d4
SS
2661i386_emit_eq_goto (int *offset_p, int *size_p)
2662{
2663 EMIT_ASM32 (eq,
2664 /* Check low half first, more likely to be decider */
2665 "cmpl %eax,(%esp)\n\t"
2666 "jne .Leq_fallthru\n\t"
2667 "cmpl %ebx,4(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "lea 0x8(%esp),%esp\n\t"
2670 "pop %eax\n\t"
2671 "pop %ebx\n\t"
2672 /* jmp, but don't trust the assembler to choose the right jump */
2673 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2674 ".Leq_fallthru:\n\t"
2675 "lea 0x8(%esp),%esp\n\t"
2676 "pop %eax\n\t"
2677 "pop %ebx");
2678
2679 if (offset_p)
2680 *offset_p = 18;
2681 if (size_p)
2682 *size_p = 4;
2683}
2684
df4a0200 2685static void
6b9801d4
SS
2686i386_emit_ne_goto (int *offset_p, int *size_p)
2687{
2688 EMIT_ASM32 (ne,
2689 /* Check low half first, more likely to be decider */
2690 "cmpl %eax,(%esp)\n\t"
2691 "jne .Lne_jump\n\t"
2692 "cmpl %ebx,4(%esp)\n\t"
2693 "je .Lne_fallthru\n\t"
2694 ".Lne_jump:\n\t"
2695 "lea 0x8(%esp),%esp\n\t"
2696 "pop %eax\n\t"
2697 "pop %ebx\n\t"
2698 /* jmp, but don't trust the assembler to choose the right jump */
2699 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2700 ".Lne_fallthru:\n\t"
2701 "lea 0x8(%esp),%esp\n\t"
2702 "pop %eax\n\t"
2703 "pop %ebx");
2704
2705 if (offset_p)
2706 *offset_p = 18;
2707 if (size_p)
2708 *size_p = 4;
2709}
2710
df4a0200 2711static void
6b9801d4
SS
2712i386_emit_lt_goto (int *offset_p, int *size_p)
2713{
2714 EMIT_ASM32 (lt,
2715 "cmpl %ebx,4(%esp)\n\t"
2716 "jl .Llt_jump\n\t"
2717 "jne .Llt_fallthru\n\t"
2718 "cmpl %eax,(%esp)\n\t"
2719 "jnl .Llt_fallthru\n\t"
2720 ".Llt_jump:\n\t"
2721 "lea 0x8(%esp),%esp\n\t"
2722 "pop %eax\n\t"
2723 "pop %ebx\n\t"
2724 /* jmp, but don't trust the assembler to choose the right jump */
2725 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2726 ".Llt_fallthru:\n\t"
2727 "lea 0x8(%esp),%esp\n\t"
2728 "pop %eax\n\t"
2729 "pop %ebx");
2730
2731 if (offset_p)
2732 *offset_p = 20;
2733 if (size_p)
2734 *size_p = 4;
2735}
2736
df4a0200 2737static void
6b9801d4
SS
2738i386_emit_le_goto (int *offset_p, int *size_p)
2739{
2740 EMIT_ASM32 (le,
2741 "cmpl %ebx,4(%esp)\n\t"
2742 "jle .Lle_jump\n\t"
2743 "jne .Lle_fallthru\n\t"
2744 "cmpl %eax,(%esp)\n\t"
2745 "jnle .Lle_fallthru\n\t"
2746 ".Lle_jump:\n\t"
2747 "lea 0x8(%esp),%esp\n\t"
2748 "pop %eax\n\t"
2749 "pop %ebx\n\t"
2750 /* jmp, but don't trust the assembler to choose the right jump */
2751 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2752 ".Lle_fallthru:\n\t"
2753 "lea 0x8(%esp),%esp\n\t"
2754 "pop %eax\n\t"
2755 "pop %ebx");
2756
2757 if (offset_p)
2758 *offset_p = 20;
2759 if (size_p)
2760 *size_p = 4;
2761}
2762
df4a0200 2763static void
6b9801d4
SS
2764i386_emit_gt_goto (int *offset_p, int *size_p)
2765{
2766 EMIT_ASM32 (gt,
2767 "cmpl %ebx,4(%esp)\n\t"
2768 "jg .Lgt_jump\n\t"
2769 "jne .Lgt_fallthru\n\t"
2770 "cmpl %eax,(%esp)\n\t"
2771 "jng .Lgt_fallthru\n\t"
2772 ".Lgt_jump:\n\t"
2773 "lea 0x8(%esp),%esp\n\t"
2774 "pop %eax\n\t"
2775 "pop %ebx\n\t"
2776 /* jmp, but don't trust the assembler to choose the right jump */
2777 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2778 ".Lgt_fallthru:\n\t"
2779 "lea 0x8(%esp),%esp\n\t"
2780 "pop %eax\n\t"
2781 "pop %ebx");
2782
2783 if (offset_p)
2784 *offset_p = 20;
2785 if (size_p)
2786 *size_p = 4;
2787}
2788
df4a0200 2789static void
6b9801d4
SS
2790i386_emit_ge_goto (int *offset_p, int *size_p)
2791{
2792 EMIT_ASM32 (ge,
2793 "cmpl %ebx,4(%esp)\n\t"
2794 "jge .Lge_jump\n\t"
2795 "jne .Lge_fallthru\n\t"
2796 "cmpl %eax,(%esp)\n\t"
2797 "jnge .Lge_fallthru\n\t"
2798 ".Lge_jump:\n\t"
2799 "lea 0x8(%esp),%esp\n\t"
2800 "pop %eax\n\t"
2801 "pop %ebx\n\t"
2802 /* jmp, but don't trust the assembler to choose the right jump */
2803 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2804 ".Lge_fallthru:\n\t"
2805 "lea 0x8(%esp),%esp\n\t"
2806 "pop %eax\n\t"
2807 "pop %ebx");
2808
2809 if (offset_p)
2810 *offset_p = 20;
2811 if (size_p)
2812 *size_p = 4;
2813}
2814
6a271cae
PA
2815struct emit_ops i386_emit_ops =
2816 {
2817 i386_emit_prologue,
2818 i386_emit_epilogue,
2819 i386_emit_add,
2820 i386_emit_sub,
2821 i386_emit_mul,
2822 i386_emit_lsh,
2823 i386_emit_rsh_signed,
2824 i386_emit_rsh_unsigned,
2825 i386_emit_ext,
2826 i386_emit_log_not,
2827 i386_emit_bit_and,
2828 i386_emit_bit_or,
2829 i386_emit_bit_xor,
2830 i386_emit_bit_not,
2831 i386_emit_equal,
2832 i386_emit_less_signed,
2833 i386_emit_less_unsigned,
2834 i386_emit_ref,
2835 i386_emit_if_goto,
2836 i386_emit_goto,
2837 i386_write_goto_address,
2838 i386_emit_const,
2839 i386_emit_call,
2840 i386_emit_reg,
2841 i386_emit_pop,
2842 i386_emit_stack_flush,
2843 i386_emit_zero_ext,
2844 i386_emit_swap,
2845 i386_emit_stack_adjust,
2846 i386_emit_int_call_1,
6b9801d4
SS
2847 i386_emit_void_call_2,
2848 i386_emit_eq_goto,
2849 i386_emit_ne_goto,
2850 i386_emit_lt_goto,
2851 i386_emit_le_goto,
2852 i386_emit_gt_goto,
2853 i386_emit_ge_goto
6a271cae
PA
2854 };
2855
2856
2857static struct emit_ops *
2858x86_emit_ops (void)
2859{
2860#ifdef __x86_64__
3aee8918 2861 if (is_64bit_tdesc ())
6a271cae
PA
2862 return &amd64_emit_ops;
2863 else
2864#endif
2865 return &i386_emit_ops;
2866}
2867
3ca4edb6 2868/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2869
3ca4edb6
TBA
2870const gdb_byte *
2871x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2872{
2873 *size = x86_breakpoint_len;
2874 return x86_breakpoint;
2875}
2876
c2d6af84
PA
2877static int
2878x86_supports_range_stepping (void)
2879{
2880 return 1;
2881}
2882
7d00775e
AT
2883/* Implementation of linux_target_ops method "supports_hardware_single_step".
2884 */
2885
2886static int
2887x86_supports_hardware_single_step (void)
2888{
2889 return 1;
2890}
2891
ae91f625
MK
2892static int
2893x86_get_ipa_tdesc_idx (void)
2894{
2895 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2896 const struct target_desc *tdesc = regcache->tdesc;
2897
2898#ifdef __x86_64__
b4570e4b 2899 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2900#endif
2901
f49ff000 2902 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2903 return X86_TDESC_SSE;
ae91f625 2904
f49ff000 2905 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2906}
2907
d0722149
DE
2908/* This is initialized assuming an amd64 target.
2909 x86_arch_setup will correct it for i386 or amd64 targets. */
2910
2911struct linux_target_ops the_low_target =
2912{
802e8e6d 2913 x86_supports_z_point_type,
aa5ca48f
DE
2914 x86_insert_point,
2915 x86_remove_point,
2916 x86_stopped_by_watchpoint,
2917 x86_stopped_data_address,
d0722149
DE
2918 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2919 native i386 case (no registers smaller than an xfer unit), and are not
2920 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2921 NULL,
2922 NULL,
2923 /* need to fix up i386 siginfo if host is amd64 */
2924 x86_siginfo_fixup,
aa5ca48f 2925 x86_linux_new_process,
04ec7890 2926 x86_linux_delete_process,
aa5ca48f 2927 x86_linux_new_thread,
466eecee 2928 x86_linux_delete_thread,
3a8a0396 2929 x86_linux_new_fork,
1570b33e 2930 x86_linux_prepare_to_resume,
219f2f23 2931 x86_linux_process_qsupported,
fa593d66
PA
2932 x86_supports_tracepoints,
2933 x86_get_thread_area,
6a271cae 2934 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2935 x86_emit_ops,
2936 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2937 x86_supports_range_stepping,
7d00775e 2938 x86_supports_hardware_single_step,
82075af2 2939 x86_get_syscall_trapinfo,
ae91f625 2940 x86_get_ipa_tdesc_idx,
d0722149 2941};
3aee8918 2942
ef0478f6
TBA
2943/* The linux target ops object. */
2944
2945linux_process_target *the_linux_target = &the_x86_target;
2946
3aee8918
PA
2947void
2948initialize_low_arch (void)
2949{
2950 /* Initialize the Linux target descriptions. */
2951#ifdef __x86_64__
cc397f3a 2952 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2953 copy_target_description (tdesc_amd64_linux_no_xml,
2954 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2955 false));
3aee8918
PA
2956 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2957#endif
f49ff000 2958
cc397f3a 2959 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2960 copy_target_description (tdesc_i386_linux_no_xml,
2961 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2962 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2963
2964 initialize_regsets_info (&x86_regsets_info);
2965}
This page took 1.228955 seconds and 4 git commands to generate.