gdbserver/linux-low: turn 'supports_z_point_type' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
3ca4edb6
TBA
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
007c9b97
TBA
111 bool supports_z_point_type (char z_type) override;
112
797bcff5
TBA
113protected:
114
115 void low_arch_setup () override;
daca57a7
TBA
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
126
127 int low_decr_pc_after_break () override;
d7146cda
TBA
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
ef0478f6
TBA
130};
131
132/* The singleton target ops object. */
133
134static x86_target the_x86_target;
135
aa5ca48f
DE
136/* Per-process arch-specific data we want to keep. */
137
138struct arch_process_info
139{
df7e5265 140 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
141};
142
d0722149
DE
143#ifdef __x86_64__
144
145/* Mapping between the general-purpose registers in `struct user'
146 format and GDB's register array layout.
147 Note that the transfer layout uses 64-bit regs. */
148static /*const*/ int i386_regmap[] =
149{
150 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
151 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
152 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
153 DS * 8, ES * 8, FS * 8, GS * 8
154};
155
156#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
157
158/* So code below doesn't have to care, i386 or amd64. */
159#define ORIG_EAX ORIG_RAX
bc9540e8 160#define REGSIZE 8
d0722149
DE
161
162static const int x86_64_regmap[] =
163{
164 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
165 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
166 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
167 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
168 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
169 DS * 8, ES * 8, FS * 8, GS * 8,
170 -1, -1, -1, -1, -1, -1, -1, -1,
171 -1, -1, -1, -1, -1, -1, -1, -1,
172 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
173 -1,
174 -1, -1, -1, -1, -1, -1, -1, -1,
175 ORIG_RAX * 8,
2735833d
WT
176#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
177 21 * 8, 22 * 8,
178#else
179 -1, -1,
180#endif
a196ebeb 181 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
182 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
183 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
186 -1, -1, -1, -1, -1, -1, -1, -1,
187 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
188 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1 /* pkru */
d0722149
DE
193};
194
195#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 196#define X86_64_USER_REGS (GS + 1)
d0722149
DE
197
198#else /* ! __x86_64__ */
199
200/* Mapping between the general-purpose registers in `struct user'
201 format and GDB's register array layout. */
202static /*const*/ int i386_regmap[] =
203{
204 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
205 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
206 EIP * 4, EFL * 4, CS * 4, SS * 4,
207 DS * 4, ES * 4, FS * 4, GS * 4
208};
209
210#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
211
bc9540e8
PA
212#define REGSIZE 4
213
d0722149 214#endif
3aee8918
PA
215
216#ifdef __x86_64__
217
218/* Returns true if the current inferior belongs to a x86-64 process,
219 per the tdesc. */
220
221static int
222is_64bit_tdesc (void)
223{
0bfdf32f 224 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
225
226 return register_size (regcache->tdesc, 0) == 8;
227}
228
229#endif
230
d0722149
DE
231\f
232/* Called by libthread_db. */
233
234ps_err_e
754653a7 235ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
236 lwpid_t lwpid, int idx, void **base)
237{
238#ifdef __x86_64__
3aee8918 239 int use_64bit = is_64bit_tdesc ();
d0722149
DE
240
241 if (use_64bit)
242 {
243 switch (idx)
244 {
245 case FS:
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
247 return PS_OK;
248 break;
249 case GS:
250 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
251 return PS_OK;
252 break;
253 default:
254 return PS_BADADDR;
255 }
256 return PS_ERR;
257 }
258#endif
259
260 {
261 unsigned int desc[4];
262
263 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
264 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
265 return PS_ERR;
266
d1ec4ce7
DE
267 /* Ensure we properly extend the value to 64-bits for x86_64. */
268 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
269 return PS_OK;
270 }
271}
fa593d66
PA
272
273/* Get the thread area address. This is used to recognize which
274 thread is which when tracing with the in-process agent library. We
275 don't read anything from the address, and treat it as opaque; it's
276 the address itself that we assume is unique per-thread. */
277
278static int
279x86_get_thread_area (int lwpid, CORE_ADDR *addr)
280{
281#ifdef __x86_64__
3aee8918 282 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
283
284 if (use_64bit)
285 {
286 void *base;
287 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
288 {
289 *addr = (CORE_ADDR) (uintptr_t) base;
290 return 0;
291 }
292
293 return -1;
294 }
295#endif
296
297 {
f2907e49 298 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
299 struct thread_info *thr = get_lwp_thread (lwp);
300 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
301 unsigned int desc[4];
302 ULONGEST gs = 0;
303 const int reg_thread_area = 3; /* bits to scale down register value. */
304 int idx;
305
306 collect_register_by_name (regcache, "gs", &gs);
307
308 idx = gs >> reg_thread_area;
309
310 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 311 lwpid_of (thr),
493e2a69 312 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
313 return -1;
314
315 *addr = desc[1];
316 return 0;
317 }
318}
319
320
d0722149 321\f
daca57a7
TBA
322bool
323x86_target::low_cannot_store_register (int regno)
d0722149 324{
3aee8918
PA
325#ifdef __x86_64__
326 if (is_64bit_tdesc ())
daca57a7 327 return false;
3aee8918
PA
328#endif
329
d0722149
DE
330 return regno >= I386_NUM_REGS;
331}
332
daca57a7
TBA
333bool
334x86_target::low_cannot_fetch_register (int regno)
d0722149 335{
3aee8918
PA
336#ifdef __x86_64__
337 if (is_64bit_tdesc ())
daca57a7 338 return false;
3aee8918
PA
339#endif
340
d0722149
DE
341 return regno >= I386_NUM_REGS;
342}
343
344static void
442ea881 345x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
346{
347 int i;
348
349#ifdef __x86_64__
3aee8918 350 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
351 {
352 for (i = 0; i < X86_64_NUM_REGS; i++)
353 if (x86_64_regmap[i] != -1)
442ea881 354 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
355
356#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
357 {
358 unsigned long base;
359 int lwpid = lwpid_of (current_thread);
360
361 collect_register_by_name (regcache, "fs_base", &base);
362 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
363
364 collect_register_by_name (regcache, "gs_base", &base);
365 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
366 }
367#endif
368
d0722149
DE
369 return;
370 }
9e0aa64f
JK
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
375#endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 379
442ea881 380 collect_register_by_name (regcache, "orig_eax",
bc9540e8 381 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 382
e90a813d 383#ifdef __x86_64__
3f52fdbc
KB
384 /* Sign extend EAX value to avoid potential syscall restart
385 problems.
386
387 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
388 for a detailed explanation. */
389 if (register_size (regcache->tdesc, 0) == 4)
390 {
391 void *ptr = ((gdb_byte *) buf
392 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
393
394 *(int64_t *) ptr = *(int32_t *) ptr;
395 }
e90a813d 396#endif
d0722149
DE
397}
398
399static void
442ea881 400x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
401{
402 int i;
403
404#ifdef __x86_64__
3aee8918 405 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
406 {
407 for (i = 0; i < X86_64_NUM_REGS; i++)
408 if (x86_64_regmap[i] != -1)
442ea881 409 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
410
411#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
412 {
413 unsigned long base;
414 int lwpid = lwpid_of (current_thread);
415
416 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
417 supply_register_by_name (regcache, "fs_base", &base);
418
419 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
420 supply_register_by_name (regcache, "gs_base", &base);
421 }
422#endif
d0722149
DE
423 return;
424 }
425#endif
426
427 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 428 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 429
442ea881 430 supply_register_by_name (regcache, "orig_eax",
bc9540e8 431 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
432}
433
434static void
442ea881 435x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
436{
437#ifdef __x86_64__
442ea881 438 i387_cache_to_fxsave (regcache, buf);
d0722149 439#else
442ea881 440 i387_cache_to_fsave (regcache, buf);
d0722149
DE
441#endif
442}
443
444static void
442ea881 445x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
446{
447#ifdef __x86_64__
442ea881 448 i387_fxsave_to_cache (regcache, buf);
d0722149 449#else
442ea881 450 i387_fsave_to_cache (regcache, buf);
d0722149
DE
451#endif
452}
453
454#ifndef __x86_64__
455
456static void
442ea881 457x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 458{
442ea881 459 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
460}
461
462static void
442ea881 463x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 464{
442ea881 465 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
466}
467
468#endif
469
1570b33e
L
470static void
471x86_fill_xstateregset (struct regcache *regcache, void *buf)
472{
473 i387_cache_to_xsave (regcache, buf);
474}
475
476static void
477x86_store_xstateregset (struct regcache *regcache, const void *buf)
478{
479 i387_xsave_to_cache (regcache, buf);
480}
481
d0722149
DE
482/* ??? The non-biarch i386 case stores all the i387 regs twice.
483 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
484 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
485 doesn't work. IWBN to avoid the duplication in the case where it
486 does work. Maybe the arch_setup routine could check whether it works
3aee8918 487 and update the supported regsets accordingly. */
d0722149 488
3aee8918 489static struct regset_info x86_regsets[] =
d0722149
DE
490{
491#ifdef HAVE_PTRACE_GETREGS
1570b33e 492 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
493 GENERAL_REGS,
494 x86_fill_gregset, x86_store_gregset },
1570b33e
L
495 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
496 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
497# ifndef __x86_64__
498# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 499 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
500 EXTENDED_REGS,
501 x86_fill_fpxregset, x86_store_fpxregset },
502# endif
503# endif
1570b33e 504 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
505 FP_REGS,
506 x86_fill_fpregset, x86_store_fpregset },
507#endif /* HAVE_PTRACE_GETREGS */
50bc912a 508 NULL_REGSET
d0722149
DE
509};
510
bf9ae9d8
TBA
511bool
512x86_target::low_supports_breakpoints ()
513{
514 return true;
515}
516
517CORE_ADDR
518x86_target::low_get_pc (regcache *regcache)
d0722149 519{
3aee8918 520 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
521
522 if (use_64bit)
523 {
6598661d
PA
524 uint64_t pc;
525
442ea881 526 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
527 return (CORE_ADDR) pc;
528 }
529 else
530 {
6598661d
PA
531 uint32_t pc;
532
442ea881 533 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
534 return (CORE_ADDR) pc;
535 }
536}
537
bf9ae9d8
TBA
538void
539x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 540{
3aee8918 541 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
542
543 if (use_64bit)
544 {
6598661d
PA
545 uint64_t newpc = pc;
546
442ea881 547 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
548 }
549 else
550 {
6598661d
PA
551 uint32_t newpc = pc;
552
442ea881 553 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
554 }
555}
d4807ea2
TBA
556
557int
558x86_target::low_decr_pc_after_break ()
559{
560 return 1;
561}
562
d0722149 563\f
dd373349 564static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
565#define x86_breakpoint_len 1
566
d7146cda
TBA
567bool
568x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
569{
570 unsigned char c;
571
d7146cda 572 read_memory (pc, &c, 1);
d0722149 573 if (c == 0xCC)
d7146cda 574 return true;
d0722149 575
d7146cda 576 return false;
d0722149
DE
577}
578\f
42995dbd 579/* Low-level function vector. */
df7e5265 580struct x86_dr_low_type x86_dr_low =
42995dbd 581 {
d33472ad
GB
582 x86_linux_dr_set_control,
583 x86_linux_dr_set_addr,
584 x86_linux_dr_get_addr,
585 x86_linux_dr_get_status,
586 x86_linux_dr_get_control,
42995dbd
GB
587 sizeof (void *),
588 };
aa5ca48f 589\f
90d74c30 590/* Breakpoint/Watchpoint support. */
aa5ca48f 591
007c9b97
TBA
592bool
593x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
594{
595 switch (z_type)
596 {
597 case Z_PACKET_SW_BP:
598 case Z_PACKET_HW_BP:
599 case Z_PACKET_WRITE_WP:
600 case Z_PACKET_ACCESS_WP:
007c9b97 601 return true;
802e8e6d 602 default:
007c9b97 603 return false;
802e8e6d
PA
604 }
605}
606
607static int
608x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
609 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
610{
611 struct process_info *proc = current_process ();
802e8e6d 612
aa5ca48f
DE
613 switch (type)
614 {
802e8e6d
PA
615 case raw_bkpt_type_hw:
616 case raw_bkpt_type_write_wp:
617 case raw_bkpt_type_access_wp:
a4165e94 618 {
802e8e6d
PA
619 enum target_hw_bp_type hw_type
620 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 621 struct x86_debug_reg_state *state
fe978cb0 622 = &proc->priv->arch_private->debug_reg_state;
a4165e94 623
df7e5265 624 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 625 }
961bd387 626
aa5ca48f
DE
627 default:
628 /* Unsupported. */
629 return 1;
630 }
631}
632
633static int
802e8e6d
PA
634x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
635 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
636{
637 struct process_info *proc = current_process ();
802e8e6d 638
aa5ca48f
DE
639 switch (type)
640 {
802e8e6d
PA
641 case raw_bkpt_type_hw:
642 case raw_bkpt_type_write_wp:
643 case raw_bkpt_type_access_wp:
a4165e94 644 {
802e8e6d
PA
645 enum target_hw_bp_type hw_type
646 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 647 struct x86_debug_reg_state *state
fe978cb0 648 = &proc->priv->arch_private->debug_reg_state;
a4165e94 649
df7e5265 650 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 651 }
aa5ca48f
DE
652 default:
653 /* Unsupported. */
654 return 1;
655 }
656}
657
658static int
659x86_stopped_by_watchpoint (void)
660{
661 struct process_info *proc = current_process ();
fe978cb0 662 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
663}
664
665static CORE_ADDR
666x86_stopped_data_address (void)
667{
668 struct process_info *proc = current_process ();
669 CORE_ADDR addr;
fe978cb0 670 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 671 &addr))
aa5ca48f
DE
672 return addr;
673 return 0;
674}
675\f
676/* Called when a new process is created. */
677
678static struct arch_process_info *
679x86_linux_new_process (void)
680{
ed859da7 681 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 682
df7e5265 683 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
684
685 return info;
686}
687
04ec7890
SM
688/* Called when a process is being deleted. */
689
690static void
691x86_linux_delete_process (struct arch_process_info *info)
692{
693 xfree (info);
694}
695
3a8a0396
DB
696/* Target routine for linux_new_fork. */
697
698static void
699x86_linux_new_fork (struct process_info *parent, struct process_info *child)
700{
701 /* These are allocated by linux_add_process. */
702 gdb_assert (parent->priv != NULL
703 && parent->priv->arch_private != NULL);
704 gdb_assert (child->priv != NULL
705 && child->priv->arch_private != NULL);
706
707 /* Linux kernel before 2.6.33 commit
708 72f674d203cd230426437cdcf7dd6f681dad8b0d
709 will inherit hardware debug registers from parent
710 on fork/vfork/clone. Newer Linux kernels create such tasks with
711 zeroed debug registers.
712
713 GDB core assumes the child inherits the watchpoints/hw
714 breakpoints of the parent, and will remove them all from the
715 forked off process. Copy the debug registers mirrors into the
716 new process so that all breakpoints and watchpoints can be
717 removed together. The debug registers mirror will become zeroed
718 in the end before detaching the forked off process, thus making
719 this compatible with older Linux kernels too. */
720
721 *child->priv->arch_private = *parent->priv->arch_private;
722}
723
70a0bb6b
GB
724/* See nat/x86-dregs.h. */
725
726struct x86_debug_reg_state *
727x86_debug_reg_state (pid_t pid)
728{
729 struct process_info *proc = find_process_pid (pid);
730
731 return &proc->priv->arch_private->debug_reg_state;
732}
aa5ca48f 733\f
d0722149
DE
734/* When GDBSERVER is built as a 64-bit application on linux, the
735 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
736 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
737 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
738 conversion in-place ourselves. */
739
9cf12d57 740/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
741 layout of the inferiors' architecture. Returns true if any
742 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 743 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
744 INF. */
745
746static int
9cf12d57 747x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
748{
749#ifdef __x86_64__
760256f9 750 unsigned int machine;
0bfdf32f 751 int tid = lwpid_of (current_thread);
760256f9
PA
752 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
753
d0722149 754 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 755 if (!is_64bit_tdesc ())
9cf12d57 756 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 757 FIXUP_32);
c92b5177 758 /* No fixup for native x32 GDB. */
760256f9 759 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 760 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 761 FIXUP_X32);
d0722149
DE
762#endif
763
764 return 0;
765}
766\f
1570b33e
L
767static int use_xml;
768
3aee8918
PA
769/* Format of XSAVE extended state is:
770 struct
771 {
772 fxsave_bytes[0..463]
773 sw_usable_bytes[464..511]
774 xstate_hdr_bytes[512..575]
775 avx_bytes[576..831]
776 future_state etc
777 };
778
779 Same memory layout will be used for the coredump NT_X86_XSTATE
780 representing the XSAVE extended state registers.
781
782 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
783 extended state mask, which is the same as the extended control register
784 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
785 together with the mask saved in the xstate_hdr_bytes to determine what
786 states the processor/OS supports and what state, used or initialized,
787 the process/thread is in. */
788#define I386_LINUX_XSAVE_XCR0_OFFSET 464
789
790/* Does the current host support the GETFPXREGS request? The header
791 file may or may not define it, and even if it is defined, the
792 kernel will return EIO if it's running on a pre-SSE processor. */
793int have_ptrace_getfpxregs =
794#ifdef HAVE_PTRACE_GETFPXREGS
795 -1
796#else
797 0
798#endif
799;
1570b33e 800
3aee8918
PA
801/* Get Linux/x86 target description from running target. */
802
803static const struct target_desc *
804x86_linux_read_description (void)
1570b33e 805{
3aee8918
PA
806 unsigned int machine;
807 int is_elf64;
a196ebeb 808 int xcr0_features;
3aee8918
PA
809 int tid;
810 static uint64_t xcr0;
3a13a53b 811 struct regset_info *regset;
1570b33e 812
0bfdf32f 813 tid = lwpid_of (current_thread);
1570b33e 814
3aee8918 815 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 816
3aee8918 817 if (sizeof (void *) == 4)
3a13a53b 818 {
3aee8918
PA
819 if (is_elf64 > 0)
820 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
821#ifndef __x86_64__
822 else if (machine == EM_X86_64)
823 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
824#endif
825 }
3a13a53b 826
3aee8918
PA
827#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
828 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
829 {
830 elf_fpxregset_t fpxregs;
3a13a53b 831
3aee8918 832 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 833 {
3aee8918
PA
834 have_ptrace_getfpxregs = 0;
835 have_ptrace_getregset = 0;
f49ff000 836 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 837 }
3aee8918
PA
838 else
839 have_ptrace_getfpxregs = 1;
3a13a53b 840 }
1570b33e
L
841#endif
842
843 if (!use_xml)
844 {
df7e5265 845 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 846
1570b33e
L
847 /* Don't use XML. */
848#ifdef __x86_64__
3aee8918
PA
849 if (machine == EM_X86_64)
850 return tdesc_amd64_linux_no_xml;
1570b33e 851 else
1570b33e 852#endif
3aee8918 853 return tdesc_i386_linux_no_xml;
1570b33e
L
854 }
855
1570b33e
L
856 if (have_ptrace_getregset == -1)
857 {
df7e5265 858 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 859 struct iovec iov;
1570b33e
L
860
861 iov.iov_base = xstateregs;
862 iov.iov_len = sizeof (xstateregs);
863
864 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
865 if (ptrace (PTRACE_GETREGSET, tid,
866 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
867 have_ptrace_getregset = 0;
868 else
1570b33e 869 {
3aee8918
PA
870 have_ptrace_getregset = 1;
871
872 /* Get XCR0 from XSAVE extended state. */
873 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
874 / sizeof (uint64_t))];
875
876 /* Use PTRACE_GETREGSET if it is available. */
877 for (regset = x86_regsets;
878 regset->fill_function != NULL; regset++)
879 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 880 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
881 else if (regset->type != GENERAL_REGS)
882 regset->size = 0;
1570b33e 883 }
1570b33e
L
884 }
885
3aee8918 886 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 887 xcr0_features = (have_ptrace_getregset
2e1e43e1 888 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 889
a196ebeb 890 if (xcr0_features)
3aee8918 891 x86_xcr0 = xcr0;
1570b33e 892
3aee8918
PA
893 if (machine == EM_X86_64)
894 {
1570b33e 895#ifdef __x86_64__
b4570e4b 896 const target_desc *tdesc = NULL;
a196ebeb 897
b4570e4b 898 if (xcr0_features)
3aee8918 899 {
b4570e4b
YQ
900 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
901 !is_elf64);
1570b33e 902 }
b4570e4b
YQ
903
904 if (tdesc == NULL)
905 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
906 return tdesc;
3aee8918 907#endif
1570b33e 908 }
3aee8918
PA
909 else
910 {
f49ff000 911 const target_desc *tdesc = NULL;
a1fa17ee 912
f49ff000
YQ
913 if (xcr0_features)
914 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 915
f49ff000
YQ
916 if (tdesc == NULL)
917 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 918
f49ff000 919 return tdesc;
3aee8918
PA
920 }
921
922 gdb_assert_not_reached ("failed to return tdesc");
923}
924
3aee8918
PA
925/* Update all the target description of all processes; a new GDB
926 connected, and it may or not support xml target descriptions. */
927
797bcff5
TBA
928void
929x86_target::update_xmltarget ()
3aee8918 930{
0bfdf32f 931 struct thread_info *saved_thread = current_thread;
3aee8918
PA
932
933 /* Before changing the register cache's internal layout, flush the
934 contents of the current valid caches back to the threads, and
935 release the current regcache objects. */
936 regcache_release ();
937
797bcff5 938 for_each_process ([this] (process_info *proc) {
9179355e
SM
939 int pid = proc->pid;
940
941 /* Look up any thread of this process. */
942 current_thread = find_any_thread_of_pid (pid);
943
797bcff5 944 low_arch_setup ();
9179355e 945 });
3aee8918 946
0bfdf32f 947 current_thread = saved_thread;
1570b33e
L
948}
949
950/* Process qSupported query, "xmlRegisters=". Update the buffer size for
951 PTRACE_GETREGSET. */
952
953static void
06e03fff 954x86_linux_process_qsupported (char **features, int count)
1570b33e 955{
06e03fff
PA
956 int i;
957
1570b33e
L
958 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
959 with "i386" in qSupported query, it supports x86 XML target
960 descriptions. */
961 use_xml = 0;
06e03fff 962 for (i = 0; i < count; i++)
1570b33e 963 {
06e03fff 964 const char *feature = features[i];
1570b33e 965
06e03fff 966 if (startswith (feature, "xmlRegisters="))
1570b33e 967 {
06e03fff 968 char *copy = xstrdup (feature + 13);
06e03fff 969
ca3a04f6
CB
970 char *saveptr;
971 for (char *p = strtok_r (copy, ",", &saveptr);
972 p != NULL;
973 p = strtok_r (NULL, ",", &saveptr))
1570b33e 974 {
06e03fff
PA
975 if (strcmp (p, "i386") == 0)
976 {
977 use_xml = 1;
978 break;
979 }
1570b33e 980 }
1570b33e 981
06e03fff
PA
982 free (copy);
983 }
1570b33e 984 }
797bcff5 985 the_x86_target.update_xmltarget ();
1570b33e
L
986}
987
3aee8918 988/* Common for x86/x86-64. */
d0722149 989
3aee8918
PA
990static struct regsets_info x86_regsets_info =
991 {
992 x86_regsets, /* regsets */
993 0, /* num_regsets */
994 NULL, /* disabled_regsets */
995 };
214d508e
L
996
997#ifdef __x86_64__
3aee8918
PA
998static struct regs_info amd64_linux_regs_info =
999 {
1000 NULL, /* regset_bitmap */
1001 NULL, /* usrregs_info */
1002 &x86_regsets_info
1003 };
d0722149 1004#endif
3aee8918
PA
1005static struct usrregs_info i386_linux_usrregs_info =
1006 {
1007 I386_NUM_REGS,
1008 i386_regmap,
1009 };
d0722149 1010
3aee8918
PA
1011static struct regs_info i386_linux_regs_info =
1012 {
1013 NULL, /* regset_bitmap */
1014 &i386_linux_usrregs_info,
1015 &x86_regsets_info
1016 };
d0722149 1017
aa8d21c9
TBA
1018const regs_info *
1019x86_target::get_regs_info ()
3aee8918
PA
1020{
1021#ifdef __x86_64__
1022 if (is_64bit_tdesc ())
1023 return &amd64_linux_regs_info;
1024 else
1025#endif
1026 return &i386_linux_regs_info;
1027}
d0722149 1028
3aee8918
PA
1029/* Initialize the target description for the architecture of the
1030 inferior. */
1570b33e 1031
797bcff5
TBA
1032void
1033x86_target::low_arch_setup ()
3aee8918
PA
1034{
1035 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1036}
1037
82075af2
JS
1038/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1039 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1040
1041static void
4cc32bec 1042x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1043{
1044 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1045
1046 if (use_64bit)
1047 {
1048 long l_sysno;
82075af2
JS
1049
1050 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1051 *sysno = (int) l_sysno;
82075af2
JS
1052 }
1053 else
4cc32bec 1054 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1055}
1056
219f2f23
PA
1057static int
1058x86_supports_tracepoints (void)
1059{
1060 return 1;
1061}
1062
fa593d66
PA
1063static void
1064append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1065{
4196ab2a 1066 target_write_memory (*to, buf, len);
fa593d66
PA
1067 *to += len;
1068}
1069
1070static int
a121b7c1 1071push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1072{
1073 unsigned char *buf_org = buf;
1074
1075 while (1)
1076 {
1077 char *endptr;
1078 unsigned long ul = strtoul (op, &endptr, 16);
1079
1080 if (endptr == op)
1081 break;
1082
1083 *buf++ = ul;
1084 op = endptr;
1085 }
1086
1087 return buf - buf_org;
1088}
1089
1090#ifdef __x86_64__
1091
1092/* Build a jump pad that saves registers and calls a collection
1093 function. Writes a jump instruction to the jump pad to
1094 JJUMPAD_INSN. The caller is responsible to write it in at the
1095 tracepoint address. */
1096
1097static int
1098amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1099 CORE_ADDR collector,
1100 CORE_ADDR lockaddr,
1101 ULONGEST orig_size,
1102 CORE_ADDR *jump_entry,
405f8e94
SS
1103 CORE_ADDR *trampoline,
1104 ULONGEST *trampoline_size,
fa593d66
PA
1105 unsigned char *jjump_pad_insn,
1106 ULONGEST *jjump_pad_insn_size,
1107 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1108 CORE_ADDR *adjusted_insn_addr_end,
1109 char *err)
fa593d66
PA
1110{
1111 unsigned char buf[40];
1112 int i, offset;
f4647387
YQ
1113 int64_t loffset;
1114
fa593d66
PA
1115 CORE_ADDR buildaddr = *jump_entry;
1116
1117 /* Build the jump pad. */
1118
1119 /* First, do tracepoint data collection. Save registers. */
1120 i = 0;
1121 /* Need to ensure stack pointer saved first. */
1122 buf[i++] = 0x54; /* push %rsp */
1123 buf[i++] = 0x55; /* push %rbp */
1124 buf[i++] = 0x57; /* push %rdi */
1125 buf[i++] = 0x56; /* push %rsi */
1126 buf[i++] = 0x52; /* push %rdx */
1127 buf[i++] = 0x51; /* push %rcx */
1128 buf[i++] = 0x53; /* push %rbx */
1129 buf[i++] = 0x50; /* push %rax */
1130 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1131 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1132 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1133 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1134 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1135 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1136 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1137 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1138 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1139 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1140 buf[i++] = 0xbf;
c8ef42ee
PA
1141 memcpy (buf + i, &tpaddr, 8);
1142 i += 8;
fa593d66
PA
1143 buf[i++] = 0x57; /* push %rdi */
1144 append_insns (&buildaddr, i, buf);
1145
1146 /* Stack space for the collecting_t object. */
1147 i = 0;
1148 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1149 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1150 memcpy (buf + i, &tpoint, 8);
1151 i += 8;
1152 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1153 i += push_opcode (&buf[i],
1154 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1155 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1156 append_insns (&buildaddr, i, buf);
1157
1158 /* spin-lock. */
1159 i = 0;
1160 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1161 memcpy (&buf[i], (void *) &lockaddr, 8);
1162 i += 8;
1163 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1164 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1165 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1166 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1167 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1168 append_insns (&buildaddr, i, buf);
1169
1170 /* Set up the gdb_collect call. */
1171 /* At this point, (stack pointer + 0x18) is the base of our saved
1172 register block. */
1173
1174 i = 0;
1175 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1176 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1177
1178 /* tpoint address may be 64-bit wide. */
1179 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1180 memcpy (buf + i, &tpoint, 8);
1181 i += 8;
1182 append_insns (&buildaddr, i, buf);
1183
1184 /* The collector function being in the shared library, may be
1185 >31-bits away off the jump pad. */
1186 i = 0;
1187 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1188 memcpy (buf + i, &collector, 8);
1189 i += 8;
1190 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1191 append_insns (&buildaddr, i, buf);
1192
1193 /* Clear the spin-lock. */
1194 i = 0;
1195 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1196 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1197 memcpy (buf + i, &lockaddr, 8);
1198 i += 8;
1199 append_insns (&buildaddr, i, buf);
1200
1201 /* Remove stack that had been used for the collect_t object. */
1202 i = 0;
1203 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1204 append_insns (&buildaddr, i, buf);
1205
1206 /* Restore register state. */
1207 i = 0;
1208 buf[i++] = 0x48; /* add $0x8,%rsp */
1209 buf[i++] = 0x83;
1210 buf[i++] = 0xc4;
1211 buf[i++] = 0x08;
1212 buf[i++] = 0x9d; /* popfq */
1213 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1214 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1215 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1216 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1217 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1218 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1219 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1220 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1221 buf[i++] = 0x58; /* pop %rax */
1222 buf[i++] = 0x5b; /* pop %rbx */
1223 buf[i++] = 0x59; /* pop %rcx */
1224 buf[i++] = 0x5a; /* pop %rdx */
1225 buf[i++] = 0x5e; /* pop %rsi */
1226 buf[i++] = 0x5f; /* pop %rdi */
1227 buf[i++] = 0x5d; /* pop %rbp */
1228 buf[i++] = 0x5c; /* pop %rsp */
1229 append_insns (&buildaddr, i, buf);
1230
1231 /* Now, adjust the original instruction to execute in the jump
1232 pad. */
1233 *adjusted_insn_addr = buildaddr;
1234 relocate_instruction (&buildaddr, tpaddr);
1235 *adjusted_insn_addr_end = buildaddr;
1236
1237 /* Finally, write a jump back to the program. */
f4647387
YQ
1238
1239 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1240 if (loffset > INT_MAX || loffset < INT_MIN)
1241 {
1242 sprintf (err,
1243 "E.Jump back from jump pad too far from tracepoint "
1244 "(offset 0x%" PRIx64 " > int32).", loffset);
1245 return 1;
1246 }
1247
1248 offset = (int) loffset;
fa593d66
PA
1249 memcpy (buf, jump_insn, sizeof (jump_insn));
1250 memcpy (buf + 1, &offset, 4);
1251 append_insns (&buildaddr, sizeof (jump_insn), buf);
1252
1253 /* The jump pad is now built. Wire in a jump to our jump pad. This
1254 is always done last (by our caller actually), so that we can
1255 install fast tracepoints with threads running. This relies on
1256 the agent's atomic write support. */
f4647387
YQ
1257 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1258 if (loffset > INT_MAX || loffset < INT_MIN)
1259 {
1260 sprintf (err,
1261 "E.Jump pad too far from tracepoint "
1262 "(offset 0x%" PRIx64 " > int32).", loffset);
1263 return 1;
1264 }
1265
1266 offset = (int) loffset;
1267
fa593d66
PA
1268 memcpy (buf, jump_insn, sizeof (jump_insn));
1269 memcpy (buf + 1, &offset, 4);
1270 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1271 *jjump_pad_insn_size = sizeof (jump_insn);
1272
1273 /* Return the end address of our pad. */
1274 *jump_entry = buildaddr;
1275
1276 return 0;
1277}
1278
1279#endif /* __x86_64__ */
1280
1281/* Build a jump pad that saves registers and calls a collection
1282 function. Writes a jump instruction to the jump pad to
1283 JJUMPAD_INSN. The caller is responsible to write it in at the
1284 tracepoint address. */
1285
1286static int
1287i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1288 CORE_ADDR collector,
1289 CORE_ADDR lockaddr,
1290 ULONGEST orig_size,
1291 CORE_ADDR *jump_entry,
405f8e94
SS
1292 CORE_ADDR *trampoline,
1293 ULONGEST *trampoline_size,
fa593d66
PA
1294 unsigned char *jjump_pad_insn,
1295 ULONGEST *jjump_pad_insn_size,
1296 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1297 CORE_ADDR *adjusted_insn_addr_end,
1298 char *err)
fa593d66
PA
1299{
1300 unsigned char buf[0x100];
1301 int i, offset;
1302 CORE_ADDR buildaddr = *jump_entry;
1303
1304 /* Build the jump pad. */
1305
1306 /* First, do tracepoint data collection. Save registers. */
1307 i = 0;
1308 buf[i++] = 0x60; /* pushad */
1309 buf[i++] = 0x68; /* push tpaddr aka $pc */
1310 *((int *)(buf + i)) = (int) tpaddr;
1311 i += 4;
1312 buf[i++] = 0x9c; /* pushf */
1313 buf[i++] = 0x1e; /* push %ds */
1314 buf[i++] = 0x06; /* push %es */
1315 buf[i++] = 0x0f; /* push %fs */
1316 buf[i++] = 0xa0;
1317 buf[i++] = 0x0f; /* push %gs */
1318 buf[i++] = 0xa8;
1319 buf[i++] = 0x16; /* push %ss */
1320 buf[i++] = 0x0e; /* push %cs */
1321 append_insns (&buildaddr, i, buf);
1322
1323 /* Stack space for the collecting_t object. */
1324 i = 0;
1325 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1326
1327 /* Build the object. */
1328 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1329 memcpy (buf + i, &tpoint, 4);
1330 i += 4;
1331 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1332
1333 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1334 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1335 append_insns (&buildaddr, i, buf);
1336
1337 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1338 If we cared for it, this could be using xchg alternatively. */
1339
1340 i = 0;
1341 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1342 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1343 %esp,<lockaddr> */
1344 memcpy (&buf[i], (void *) &lockaddr, 4);
1345 i += 4;
1346 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1347 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1348 append_insns (&buildaddr, i, buf);
1349
1350
1351 /* Set up arguments to the gdb_collect call. */
1352 i = 0;
1353 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1354 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1355 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1356 append_insns (&buildaddr, i, buf);
1357
1358 i = 0;
1359 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1360 append_insns (&buildaddr, i, buf);
1361
1362 i = 0;
1363 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1364 memcpy (&buf[i], (void *) &tpoint, 4);
1365 i += 4;
1366 append_insns (&buildaddr, i, buf);
1367
1368 buf[0] = 0xe8; /* call <reladdr> */
1369 offset = collector - (buildaddr + sizeof (jump_insn));
1370 memcpy (buf + 1, &offset, 4);
1371 append_insns (&buildaddr, 5, buf);
1372 /* Clean up after the call. */
1373 buf[0] = 0x83; /* add $0x8,%esp */
1374 buf[1] = 0xc4;
1375 buf[2] = 0x08;
1376 append_insns (&buildaddr, 3, buf);
1377
1378
1379 /* Clear the spin-lock. This would need the LOCK prefix on older
1380 broken archs. */
1381 i = 0;
1382 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1383 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1384 memcpy (buf + i, &lockaddr, 4);
1385 i += 4;
1386 append_insns (&buildaddr, i, buf);
1387
1388
1389 /* Remove stack that had been used for the collect_t object. */
1390 i = 0;
1391 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1392 append_insns (&buildaddr, i, buf);
1393
1394 i = 0;
1395 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1396 buf[i++] = 0xc4;
1397 buf[i++] = 0x04;
1398 buf[i++] = 0x17; /* pop %ss */
1399 buf[i++] = 0x0f; /* pop %gs */
1400 buf[i++] = 0xa9;
1401 buf[i++] = 0x0f; /* pop %fs */
1402 buf[i++] = 0xa1;
1403 buf[i++] = 0x07; /* pop %es */
405f8e94 1404 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1405 buf[i++] = 0x9d; /* popf */
1406 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1407 buf[i++] = 0xc4;
1408 buf[i++] = 0x04;
1409 buf[i++] = 0x61; /* popad */
1410 append_insns (&buildaddr, i, buf);
1411
1412 /* Now, adjust the original instruction to execute in the jump
1413 pad. */
1414 *adjusted_insn_addr = buildaddr;
1415 relocate_instruction (&buildaddr, tpaddr);
1416 *adjusted_insn_addr_end = buildaddr;
1417
1418 /* Write the jump back to the program. */
1419 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1420 memcpy (buf, jump_insn, sizeof (jump_insn));
1421 memcpy (buf + 1, &offset, 4);
1422 append_insns (&buildaddr, sizeof (jump_insn), buf);
1423
1424 /* The jump pad is now built. Wire in a jump to our jump pad. This
1425 is always done last (by our caller actually), so that we can
1426 install fast tracepoints with threads running. This relies on
1427 the agent's atomic write support. */
405f8e94
SS
1428 if (orig_size == 4)
1429 {
1430 /* Create a trampoline. */
1431 *trampoline_size = sizeof (jump_insn);
1432 if (!claim_trampoline_space (*trampoline_size, trampoline))
1433 {
1434 /* No trampoline space available. */
1435 strcpy (err,
1436 "E.Cannot allocate trampoline space needed for fast "
1437 "tracepoints on 4-byte instructions.");
1438 return 1;
1439 }
1440
1441 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1442 memcpy (buf, jump_insn, sizeof (jump_insn));
1443 memcpy (buf + 1, &offset, 4);
4196ab2a 1444 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1445
1446 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1447 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1448 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1449 memcpy (buf + 2, &offset, 2);
1450 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1451 *jjump_pad_insn_size = sizeof (small_jump_insn);
1452 }
1453 else
1454 {
1455 /* Else use a 32-bit relative jump instruction. */
1456 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1457 memcpy (buf, jump_insn, sizeof (jump_insn));
1458 memcpy (buf + 1, &offset, 4);
1459 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1460 *jjump_pad_insn_size = sizeof (jump_insn);
1461 }
fa593d66
PA
1462
1463 /* Return the end address of our pad. */
1464 *jump_entry = buildaddr;
1465
1466 return 0;
1467}
1468
1469static int
1470x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1471 CORE_ADDR collector,
1472 CORE_ADDR lockaddr,
1473 ULONGEST orig_size,
1474 CORE_ADDR *jump_entry,
405f8e94
SS
1475 CORE_ADDR *trampoline,
1476 ULONGEST *trampoline_size,
fa593d66
PA
1477 unsigned char *jjump_pad_insn,
1478 ULONGEST *jjump_pad_insn_size,
1479 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1480 CORE_ADDR *adjusted_insn_addr_end,
1481 char *err)
fa593d66
PA
1482{
1483#ifdef __x86_64__
3aee8918 1484 if (is_64bit_tdesc ())
fa593d66
PA
1485 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1486 collector, lockaddr,
1487 orig_size, jump_entry,
405f8e94 1488 trampoline, trampoline_size,
fa593d66
PA
1489 jjump_pad_insn,
1490 jjump_pad_insn_size,
1491 adjusted_insn_addr,
405f8e94
SS
1492 adjusted_insn_addr_end,
1493 err);
fa593d66
PA
1494#endif
1495
1496 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1497 collector, lockaddr,
1498 orig_size, jump_entry,
405f8e94 1499 trampoline, trampoline_size,
fa593d66
PA
1500 jjump_pad_insn,
1501 jjump_pad_insn_size,
1502 adjusted_insn_addr,
405f8e94
SS
1503 adjusted_insn_addr_end,
1504 err);
1505}
1506
1507/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1508 architectures. */
1509
1510static int
1511x86_get_min_fast_tracepoint_insn_len (void)
1512{
1513 static int warned_about_fast_tracepoints = 0;
1514
1515#ifdef __x86_64__
1516 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1517 used for fast tracepoints. */
3aee8918 1518 if (is_64bit_tdesc ())
405f8e94
SS
1519 return 5;
1520#endif
1521
58b4daa5 1522 if (agent_loaded_p ())
405f8e94
SS
1523 {
1524 char errbuf[IPA_BUFSIZ];
1525
1526 errbuf[0] = '\0';
1527
1528 /* On x86, if trampolines are available, then 4-byte jump instructions
1529 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1530 with a 4-byte offset are used instead. */
1531 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1532 return 4;
1533 else
1534 {
1535 /* GDB has no channel to explain to user why a shorter fast
1536 tracepoint is not possible, but at least make GDBserver
1537 mention that something has gone awry. */
1538 if (!warned_about_fast_tracepoints)
1539 {
422186a9 1540 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1541 warned_about_fast_tracepoints = 1;
1542 }
1543 return 5;
1544 }
1545 }
1546 else
1547 {
1548 /* Indicate that the minimum length is currently unknown since the IPA
1549 has not loaded yet. */
1550 return 0;
1551 }
fa593d66
PA
1552}
1553
6a271cae
PA
1554static void
1555add_insns (unsigned char *start, int len)
1556{
1557 CORE_ADDR buildaddr = current_insn_ptr;
1558
1559 if (debug_threads)
87ce2a04
DE
1560 debug_printf ("Adding %d bytes of insn at %s\n",
1561 len, paddress (buildaddr));
6a271cae
PA
1562
1563 append_insns (&buildaddr, len, start);
1564 current_insn_ptr = buildaddr;
1565}
1566
6a271cae
PA
1567/* Our general strategy for emitting code is to avoid specifying raw
1568 bytes whenever possible, and instead copy a block of inline asm
1569 that is embedded in the function. This is a little messy, because
1570 we need to keep the compiler from discarding what looks like dead
1571 code, plus suppress various warnings. */
1572
9e4344e5
PA
1573#define EMIT_ASM(NAME, INSNS) \
1574 do \
1575 { \
1576 extern unsigned char start_ ## NAME, end_ ## NAME; \
1577 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1578 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1579 "\t" "start_" #NAME ":" \
1580 "\t" INSNS "\n" \
1581 "\t" "end_" #NAME ":"); \
1582 } while (0)
6a271cae
PA
1583
1584#ifdef __x86_64__
1585
1586#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1587 do \
1588 { \
1589 extern unsigned char start_ ## NAME, end_ ## NAME; \
1590 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1591 __asm__ (".code32\n" \
1592 "\t" "jmp end_" #NAME "\n" \
1593 "\t" "start_" #NAME ":\n" \
1594 "\t" INSNS "\n" \
1595 "\t" "end_" #NAME ":\n" \
1596 ".code64\n"); \
1597 } while (0)
6a271cae
PA
1598
1599#else
1600
1601#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1602
1603#endif
1604
1605#ifdef __x86_64__
1606
1607static void
1608amd64_emit_prologue (void)
1609{
1610 EMIT_ASM (amd64_prologue,
1611 "pushq %rbp\n\t"
1612 "movq %rsp,%rbp\n\t"
1613 "sub $0x20,%rsp\n\t"
1614 "movq %rdi,-8(%rbp)\n\t"
1615 "movq %rsi,-16(%rbp)");
1616}
1617
1618
1619static void
1620amd64_emit_epilogue (void)
1621{
1622 EMIT_ASM (amd64_epilogue,
1623 "movq -16(%rbp),%rdi\n\t"
1624 "movq %rax,(%rdi)\n\t"
1625 "xor %rax,%rax\n\t"
1626 "leave\n\t"
1627 "ret");
1628}
1629
1630static void
1631amd64_emit_add (void)
1632{
1633 EMIT_ASM (amd64_add,
1634 "add (%rsp),%rax\n\t"
1635 "lea 0x8(%rsp),%rsp");
1636}
1637
1638static void
1639amd64_emit_sub (void)
1640{
1641 EMIT_ASM (amd64_sub,
1642 "sub %rax,(%rsp)\n\t"
1643 "pop %rax");
1644}
1645
1646static void
1647amd64_emit_mul (void)
1648{
1649 emit_error = 1;
1650}
1651
1652static void
1653amd64_emit_lsh (void)
1654{
1655 emit_error = 1;
1656}
1657
1658static void
1659amd64_emit_rsh_signed (void)
1660{
1661 emit_error = 1;
1662}
1663
1664static void
1665amd64_emit_rsh_unsigned (void)
1666{
1667 emit_error = 1;
1668}
1669
1670static void
1671amd64_emit_ext (int arg)
1672{
1673 switch (arg)
1674 {
1675 case 8:
1676 EMIT_ASM (amd64_ext_8,
1677 "cbtw\n\t"
1678 "cwtl\n\t"
1679 "cltq");
1680 break;
1681 case 16:
1682 EMIT_ASM (amd64_ext_16,
1683 "cwtl\n\t"
1684 "cltq");
1685 break;
1686 case 32:
1687 EMIT_ASM (amd64_ext_32,
1688 "cltq");
1689 break;
1690 default:
1691 emit_error = 1;
1692 }
1693}
1694
1695static void
1696amd64_emit_log_not (void)
1697{
1698 EMIT_ASM (amd64_log_not,
1699 "test %rax,%rax\n\t"
1700 "sete %cl\n\t"
1701 "movzbq %cl,%rax");
1702}
1703
1704static void
1705amd64_emit_bit_and (void)
1706{
1707 EMIT_ASM (amd64_and,
1708 "and (%rsp),%rax\n\t"
1709 "lea 0x8(%rsp),%rsp");
1710}
1711
1712static void
1713amd64_emit_bit_or (void)
1714{
1715 EMIT_ASM (amd64_or,
1716 "or (%rsp),%rax\n\t"
1717 "lea 0x8(%rsp),%rsp");
1718}
1719
1720static void
1721amd64_emit_bit_xor (void)
1722{
1723 EMIT_ASM (amd64_xor,
1724 "xor (%rsp),%rax\n\t"
1725 "lea 0x8(%rsp),%rsp");
1726}
1727
1728static void
1729amd64_emit_bit_not (void)
1730{
1731 EMIT_ASM (amd64_bit_not,
1732 "xorq $0xffffffffffffffff,%rax");
1733}
1734
1735static void
1736amd64_emit_equal (void)
1737{
1738 EMIT_ASM (amd64_equal,
1739 "cmp %rax,(%rsp)\n\t"
1740 "je .Lamd64_equal_true\n\t"
1741 "xor %rax,%rax\n\t"
1742 "jmp .Lamd64_equal_end\n\t"
1743 ".Lamd64_equal_true:\n\t"
1744 "mov $0x1,%rax\n\t"
1745 ".Lamd64_equal_end:\n\t"
1746 "lea 0x8(%rsp),%rsp");
1747}
1748
1749static void
1750amd64_emit_less_signed (void)
1751{
1752 EMIT_ASM (amd64_less_signed,
1753 "cmp %rax,(%rsp)\n\t"
1754 "jl .Lamd64_less_signed_true\n\t"
1755 "xor %rax,%rax\n\t"
1756 "jmp .Lamd64_less_signed_end\n\t"
1757 ".Lamd64_less_signed_true:\n\t"
1758 "mov $1,%rax\n\t"
1759 ".Lamd64_less_signed_end:\n\t"
1760 "lea 0x8(%rsp),%rsp");
1761}
1762
1763static void
1764amd64_emit_less_unsigned (void)
1765{
1766 EMIT_ASM (amd64_less_unsigned,
1767 "cmp %rax,(%rsp)\n\t"
1768 "jb .Lamd64_less_unsigned_true\n\t"
1769 "xor %rax,%rax\n\t"
1770 "jmp .Lamd64_less_unsigned_end\n\t"
1771 ".Lamd64_less_unsigned_true:\n\t"
1772 "mov $1,%rax\n\t"
1773 ".Lamd64_less_unsigned_end:\n\t"
1774 "lea 0x8(%rsp),%rsp");
1775}
1776
1777static void
1778amd64_emit_ref (int size)
1779{
1780 switch (size)
1781 {
1782 case 1:
1783 EMIT_ASM (amd64_ref1,
1784 "movb (%rax),%al");
1785 break;
1786 case 2:
1787 EMIT_ASM (amd64_ref2,
1788 "movw (%rax),%ax");
1789 break;
1790 case 4:
1791 EMIT_ASM (amd64_ref4,
1792 "movl (%rax),%eax");
1793 break;
1794 case 8:
1795 EMIT_ASM (amd64_ref8,
1796 "movq (%rax),%rax");
1797 break;
1798 }
1799}
1800
1801static void
1802amd64_emit_if_goto (int *offset_p, int *size_p)
1803{
1804 EMIT_ASM (amd64_if_goto,
1805 "mov %rax,%rcx\n\t"
1806 "pop %rax\n\t"
1807 "cmp $0,%rcx\n\t"
1808 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1809 if (offset_p)
1810 *offset_p = 10;
1811 if (size_p)
1812 *size_p = 4;
1813}
1814
1815static void
1816amd64_emit_goto (int *offset_p, int *size_p)
1817{
1818 EMIT_ASM (amd64_goto,
1819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1820 if (offset_p)
1821 *offset_p = 1;
1822 if (size_p)
1823 *size_p = 4;
1824}
1825
1826static void
1827amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1828{
1829 int diff = (to - (from + size));
1830 unsigned char buf[sizeof (int)];
1831
1832 if (size != 4)
1833 {
1834 emit_error = 1;
1835 return;
1836 }
1837
1838 memcpy (buf, &diff, sizeof (int));
4196ab2a 1839 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1840}
1841
1842static void
4e29fb54 1843amd64_emit_const (LONGEST num)
6a271cae
PA
1844{
1845 unsigned char buf[16];
1846 int i;
1847 CORE_ADDR buildaddr = current_insn_ptr;
1848
1849 i = 0;
1850 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1851 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1852 i += 8;
1853 append_insns (&buildaddr, i, buf);
1854 current_insn_ptr = buildaddr;
1855}
1856
1857static void
1858amd64_emit_call (CORE_ADDR fn)
1859{
1860 unsigned char buf[16];
1861 int i;
1862 CORE_ADDR buildaddr;
4e29fb54 1863 LONGEST offset64;
6a271cae
PA
1864
1865 /* The destination function being in the shared library, may be
1866 >31-bits away off the compiled code pad. */
1867
1868 buildaddr = current_insn_ptr;
1869
1870 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1871
1872 i = 0;
1873
1874 if (offset64 > INT_MAX || offset64 < INT_MIN)
1875 {
1876 /* Offset is too large for a call. Use callq, but that requires
1877 a register, so avoid it if possible. Use r10, since it is
1878 call-clobbered, we don't have to push/pop it. */
1879 buf[i++] = 0x48; /* mov $fn,%r10 */
1880 buf[i++] = 0xba;
1881 memcpy (buf + i, &fn, 8);
1882 i += 8;
1883 buf[i++] = 0xff; /* callq *%r10 */
1884 buf[i++] = 0xd2;
1885 }
1886 else
1887 {
1888 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1889
1890 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1891 memcpy (buf + i, &offset32, 4);
1892 i += 4;
1893 }
1894
1895 append_insns (&buildaddr, i, buf);
1896 current_insn_ptr = buildaddr;
1897}
1898
1899static void
1900amd64_emit_reg (int reg)
1901{
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr;
1905
1906 /* Assume raw_regs is still in %rdi. */
1907 buildaddr = current_insn_ptr;
1908 i = 0;
1909 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1910 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1911 i += 4;
1912 append_insns (&buildaddr, i, buf);
1913 current_insn_ptr = buildaddr;
1914 amd64_emit_call (get_raw_reg_func_addr ());
1915}
1916
1917static void
1918amd64_emit_pop (void)
1919{
1920 EMIT_ASM (amd64_pop,
1921 "pop %rax");
1922}
1923
1924static void
1925amd64_emit_stack_flush (void)
1926{
1927 EMIT_ASM (amd64_stack_flush,
1928 "push %rax");
1929}
1930
1931static void
1932amd64_emit_zero_ext (int arg)
1933{
1934 switch (arg)
1935 {
1936 case 8:
1937 EMIT_ASM (amd64_zero_ext_8,
1938 "and $0xff,%rax");
1939 break;
1940 case 16:
1941 EMIT_ASM (amd64_zero_ext_16,
1942 "and $0xffff,%rax");
1943 break;
1944 case 32:
1945 EMIT_ASM (amd64_zero_ext_32,
1946 "mov $0xffffffff,%rcx\n\t"
1947 "and %rcx,%rax");
1948 break;
1949 default:
1950 emit_error = 1;
1951 }
1952}
1953
1954static void
1955amd64_emit_swap (void)
1956{
1957 EMIT_ASM (amd64_swap,
1958 "mov %rax,%rcx\n\t"
1959 "pop %rax\n\t"
1960 "push %rcx");
1961}
1962
1963static void
1964amd64_emit_stack_adjust (int n)
1965{
1966 unsigned char buf[16];
1967 int i;
1968 CORE_ADDR buildaddr = current_insn_ptr;
1969
1970 i = 0;
1971 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1972 buf[i++] = 0x8d;
1973 buf[i++] = 0x64;
1974 buf[i++] = 0x24;
1975 /* This only handles adjustments up to 16, but we don't expect any more. */
1976 buf[i++] = n * 8;
1977 append_insns (&buildaddr, i, buf);
1978 current_insn_ptr = buildaddr;
1979}
1980
1981/* FN's prototype is `LONGEST(*fn)(int)'. */
1982
1983static void
1984amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1985{
1986 unsigned char buf[16];
1987 int i;
1988 CORE_ADDR buildaddr;
1989
1990 buildaddr = current_insn_ptr;
1991 i = 0;
1992 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1993 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1994 i += 4;
1995 append_insns (&buildaddr, i, buf);
1996 current_insn_ptr = buildaddr;
1997 amd64_emit_call (fn);
1998}
1999
4e29fb54 2000/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2001
2002static void
2003amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2004{
2005 unsigned char buf[16];
2006 int i;
2007 CORE_ADDR buildaddr;
2008
2009 buildaddr = current_insn_ptr;
2010 i = 0;
2011 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2012 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2013 i += 4;
2014 append_insns (&buildaddr, i, buf);
2015 current_insn_ptr = buildaddr;
2016 EMIT_ASM (amd64_void_call_2_a,
2017 /* Save away a copy of the stack top. */
2018 "push %rax\n\t"
2019 /* Also pass top as the second argument. */
2020 "mov %rax,%rsi");
2021 amd64_emit_call (fn);
2022 EMIT_ASM (amd64_void_call_2_b,
2023 /* Restore the stack top, %rax may have been trashed. */
2024 "pop %rax");
2025}
2026
df4a0200 2027static void
6b9801d4
SS
2028amd64_emit_eq_goto (int *offset_p, int *size_p)
2029{
2030 EMIT_ASM (amd64_eq,
2031 "cmp %rax,(%rsp)\n\t"
2032 "jne .Lamd64_eq_fallthru\n\t"
2033 "lea 0x8(%rsp),%rsp\n\t"
2034 "pop %rax\n\t"
2035 /* jmp, but don't trust the assembler to choose the right jump */
2036 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2037 ".Lamd64_eq_fallthru:\n\t"
2038 "lea 0x8(%rsp),%rsp\n\t"
2039 "pop %rax");
2040
2041 if (offset_p)
2042 *offset_p = 13;
2043 if (size_p)
2044 *size_p = 4;
2045}
2046
df4a0200 2047static void
6b9801d4
SS
2048amd64_emit_ne_goto (int *offset_p, int *size_p)
2049{
2050 EMIT_ASM (amd64_ne,
2051 "cmp %rax,(%rsp)\n\t"
2052 "je .Lamd64_ne_fallthru\n\t"
2053 "lea 0x8(%rsp),%rsp\n\t"
2054 "pop %rax\n\t"
2055 /* jmp, but don't trust the assembler to choose the right jump */
2056 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2057 ".Lamd64_ne_fallthru:\n\t"
2058 "lea 0x8(%rsp),%rsp\n\t"
2059 "pop %rax");
2060
2061 if (offset_p)
2062 *offset_p = 13;
2063 if (size_p)
2064 *size_p = 4;
2065}
2066
df4a0200 2067static void
6b9801d4
SS
2068amd64_emit_lt_goto (int *offset_p, int *size_p)
2069{
2070 EMIT_ASM (amd64_lt,
2071 "cmp %rax,(%rsp)\n\t"
2072 "jnl .Lamd64_lt_fallthru\n\t"
2073 "lea 0x8(%rsp),%rsp\n\t"
2074 "pop %rax\n\t"
2075 /* jmp, but don't trust the assembler to choose the right jump */
2076 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2077 ".Lamd64_lt_fallthru:\n\t"
2078 "lea 0x8(%rsp),%rsp\n\t"
2079 "pop %rax");
2080
2081 if (offset_p)
2082 *offset_p = 13;
2083 if (size_p)
2084 *size_p = 4;
2085}
2086
df4a0200 2087static void
6b9801d4
SS
2088amd64_emit_le_goto (int *offset_p, int *size_p)
2089{
2090 EMIT_ASM (amd64_le,
2091 "cmp %rax,(%rsp)\n\t"
2092 "jnle .Lamd64_le_fallthru\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2094 "pop %rax\n\t"
2095 /* jmp, but don't trust the assembler to choose the right jump */
2096 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2097 ".Lamd64_le_fallthru:\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2099 "pop %rax");
2100
2101 if (offset_p)
2102 *offset_p = 13;
2103 if (size_p)
2104 *size_p = 4;
2105}
2106
df4a0200 2107static void
6b9801d4
SS
2108amd64_emit_gt_goto (int *offset_p, int *size_p)
2109{
2110 EMIT_ASM (amd64_gt,
2111 "cmp %rax,(%rsp)\n\t"
2112 "jng .Lamd64_gt_fallthru\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax\n\t"
2115 /* jmp, but don't trust the assembler to choose the right jump */
2116 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2117 ".Lamd64_gt_fallthru:\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2119 "pop %rax");
2120
2121 if (offset_p)
2122 *offset_p = 13;
2123 if (size_p)
2124 *size_p = 4;
2125}
2126
df4a0200 2127static void
6b9801d4
SS
2128amd64_emit_ge_goto (int *offset_p, int *size_p)
2129{
2130 EMIT_ASM (amd64_ge,
2131 "cmp %rax,(%rsp)\n\t"
2132 "jnge .Lamd64_ge_fallthru\n\t"
2133 ".Lamd64_ge_jump:\n\t"
2134 "lea 0x8(%rsp),%rsp\n\t"
2135 "pop %rax\n\t"
2136 /* jmp, but don't trust the assembler to choose the right jump */
2137 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2138 ".Lamd64_ge_fallthru:\n\t"
2139 "lea 0x8(%rsp),%rsp\n\t"
2140 "pop %rax");
2141
2142 if (offset_p)
2143 *offset_p = 13;
2144 if (size_p)
2145 *size_p = 4;
2146}
2147
6a271cae
PA
2148struct emit_ops amd64_emit_ops =
2149 {
2150 amd64_emit_prologue,
2151 amd64_emit_epilogue,
2152 amd64_emit_add,
2153 amd64_emit_sub,
2154 amd64_emit_mul,
2155 amd64_emit_lsh,
2156 amd64_emit_rsh_signed,
2157 amd64_emit_rsh_unsigned,
2158 amd64_emit_ext,
2159 amd64_emit_log_not,
2160 amd64_emit_bit_and,
2161 amd64_emit_bit_or,
2162 amd64_emit_bit_xor,
2163 amd64_emit_bit_not,
2164 amd64_emit_equal,
2165 amd64_emit_less_signed,
2166 amd64_emit_less_unsigned,
2167 amd64_emit_ref,
2168 amd64_emit_if_goto,
2169 amd64_emit_goto,
2170 amd64_write_goto_address,
2171 amd64_emit_const,
2172 amd64_emit_call,
2173 amd64_emit_reg,
2174 amd64_emit_pop,
2175 amd64_emit_stack_flush,
2176 amd64_emit_zero_ext,
2177 amd64_emit_swap,
2178 amd64_emit_stack_adjust,
2179 amd64_emit_int_call_1,
6b9801d4
SS
2180 amd64_emit_void_call_2,
2181 amd64_emit_eq_goto,
2182 amd64_emit_ne_goto,
2183 amd64_emit_lt_goto,
2184 amd64_emit_le_goto,
2185 amd64_emit_gt_goto,
2186 amd64_emit_ge_goto
6a271cae
PA
2187 };
2188
2189#endif /* __x86_64__ */
2190
2191static void
2192i386_emit_prologue (void)
2193{
2194 EMIT_ASM32 (i386_prologue,
2195 "push %ebp\n\t"
bf15cbda
SS
2196 "mov %esp,%ebp\n\t"
2197 "push %ebx");
6a271cae
PA
2198 /* At this point, the raw regs base address is at 8(%ebp), and the
2199 value pointer is at 12(%ebp). */
2200}
2201
2202static void
2203i386_emit_epilogue (void)
2204{
2205 EMIT_ASM32 (i386_epilogue,
2206 "mov 12(%ebp),%ecx\n\t"
2207 "mov %eax,(%ecx)\n\t"
2208 "mov %ebx,0x4(%ecx)\n\t"
2209 "xor %eax,%eax\n\t"
bf15cbda 2210 "pop %ebx\n\t"
6a271cae
PA
2211 "pop %ebp\n\t"
2212 "ret");
2213}
2214
2215static void
2216i386_emit_add (void)
2217{
2218 EMIT_ASM32 (i386_add,
2219 "add (%esp),%eax\n\t"
2220 "adc 0x4(%esp),%ebx\n\t"
2221 "lea 0x8(%esp),%esp");
2222}
2223
2224static void
2225i386_emit_sub (void)
2226{
2227 EMIT_ASM32 (i386_sub,
2228 "subl %eax,(%esp)\n\t"
2229 "sbbl %ebx,4(%esp)\n\t"
2230 "pop %eax\n\t"
2231 "pop %ebx\n\t");
2232}
2233
2234static void
2235i386_emit_mul (void)
2236{
2237 emit_error = 1;
2238}
2239
2240static void
2241i386_emit_lsh (void)
2242{
2243 emit_error = 1;
2244}
2245
2246static void
2247i386_emit_rsh_signed (void)
2248{
2249 emit_error = 1;
2250}
2251
2252static void
2253i386_emit_rsh_unsigned (void)
2254{
2255 emit_error = 1;
2256}
2257
2258static void
2259i386_emit_ext (int arg)
2260{
2261 switch (arg)
2262 {
2263 case 8:
2264 EMIT_ASM32 (i386_ext_8,
2265 "cbtw\n\t"
2266 "cwtl\n\t"
2267 "movl %eax,%ebx\n\t"
2268 "sarl $31,%ebx");
2269 break;
2270 case 16:
2271 EMIT_ASM32 (i386_ext_16,
2272 "cwtl\n\t"
2273 "movl %eax,%ebx\n\t"
2274 "sarl $31,%ebx");
2275 break;
2276 case 32:
2277 EMIT_ASM32 (i386_ext_32,
2278 "movl %eax,%ebx\n\t"
2279 "sarl $31,%ebx");
2280 break;
2281 default:
2282 emit_error = 1;
2283 }
2284}
2285
2286static void
2287i386_emit_log_not (void)
2288{
2289 EMIT_ASM32 (i386_log_not,
2290 "or %ebx,%eax\n\t"
2291 "test %eax,%eax\n\t"
2292 "sete %cl\n\t"
2293 "xor %ebx,%ebx\n\t"
2294 "movzbl %cl,%eax");
2295}
2296
2297static void
2298i386_emit_bit_and (void)
2299{
2300 EMIT_ASM32 (i386_and,
2301 "and (%esp),%eax\n\t"
2302 "and 0x4(%esp),%ebx\n\t"
2303 "lea 0x8(%esp),%esp");
2304}
2305
2306static void
2307i386_emit_bit_or (void)
2308{
2309 EMIT_ASM32 (i386_or,
2310 "or (%esp),%eax\n\t"
2311 "or 0x4(%esp),%ebx\n\t"
2312 "lea 0x8(%esp),%esp");
2313}
2314
2315static void
2316i386_emit_bit_xor (void)
2317{
2318 EMIT_ASM32 (i386_xor,
2319 "xor (%esp),%eax\n\t"
2320 "xor 0x4(%esp),%ebx\n\t"
2321 "lea 0x8(%esp),%esp");
2322}
2323
2324static void
2325i386_emit_bit_not (void)
2326{
2327 EMIT_ASM32 (i386_bit_not,
2328 "xor $0xffffffff,%eax\n\t"
2329 "xor $0xffffffff,%ebx\n\t");
2330}
2331
2332static void
2333i386_emit_equal (void)
2334{
2335 EMIT_ASM32 (i386_equal,
2336 "cmpl %ebx,4(%esp)\n\t"
2337 "jne .Li386_equal_false\n\t"
2338 "cmpl %eax,(%esp)\n\t"
2339 "je .Li386_equal_true\n\t"
2340 ".Li386_equal_false:\n\t"
2341 "xor %eax,%eax\n\t"
2342 "jmp .Li386_equal_end\n\t"
2343 ".Li386_equal_true:\n\t"
2344 "mov $1,%eax\n\t"
2345 ".Li386_equal_end:\n\t"
2346 "xor %ebx,%ebx\n\t"
2347 "lea 0x8(%esp),%esp");
2348}
2349
2350static void
2351i386_emit_less_signed (void)
2352{
2353 EMIT_ASM32 (i386_less_signed,
2354 "cmpl %ebx,4(%esp)\n\t"
2355 "jl .Li386_less_signed_true\n\t"
2356 "jne .Li386_less_signed_false\n\t"
2357 "cmpl %eax,(%esp)\n\t"
2358 "jl .Li386_less_signed_true\n\t"
2359 ".Li386_less_signed_false:\n\t"
2360 "xor %eax,%eax\n\t"
2361 "jmp .Li386_less_signed_end\n\t"
2362 ".Li386_less_signed_true:\n\t"
2363 "mov $1,%eax\n\t"
2364 ".Li386_less_signed_end:\n\t"
2365 "xor %ebx,%ebx\n\t"
2366 "lea 0x8(%esp),%esp");
2367}
2368
2369static void
2370i386_emit_less_unsigned (void)
2371{
2372 EMIT_ASM32 (i386_less_unsigned,
2373 "cmpl %ebx,4(%esp)\n\t"
2374 "jb .Li386_less_unsigned_true\n\t"
2375 "jne .Li386_less_unsigned_false\n\t"
2376 "cmpl %eax,(%esp)\n\t"
2377 "jb .Li386_less_unsigned_true\n\t"
2378 ".Li386_less_unsigned_false:\n\t"
2379 "xor %eax,%eax\n\t"
2380 "jmp .Li386_less_unsigned_end\n\t"
2381 ".Li386_less_unsigned_true:\n\t"
2382 "mov $1,%eax\n\t"
2383 ".Li386_less_unsigned_end:\n\t"
2384 "xor %ebx,%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386}
2387
2388static void
2389i386_emit_ref (int size)
2390{
2391 switch (size)
2392 {
2393 case 1:
2394 EMIT_ASM32 (i386_ref1,
2395 "movb (%eax),%al");
2396 break;
2397 case 2:
2398 EMIT_ASM32 (i386_ref2,
2399 "movw (%eax),%ax");
2400 break;
2401 case 4:
2402 EMIT_ASM32 (i386_ref4,
2403 "movl (%eax),%eax");
2404 break;
2405 case 8:
2406 EMIT_ASM32 (i386_ref8,
2407 "movl 4(%eax),%ebx\n\t"
2408 "movl (%eax),%eax");
2409 break;
2410 }
2411}
2412
2413static void
2414i386_emit_if_goto (int *offset_p, int *size_p)
2415{
2416 EMIT_ASM32 (i386_if_goto,
2417 "mov %eax,%ecx\n\t"
2418 "or %ebx,%ecx\n\t"
2419 "pop %eax\n\t"
2420 "pop %ebx\n\t"
2421 "cmpl $0,%ecx\n\t"
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2424
2425 if (offset_p)
2426 *offset_p = 11; /* be sure that this matches the sequence above */
2427 if (size_p)
2428 *size_p = 4;
2429}
2430
2431static void
2432i386_emit_goto (int *offset_p, int *size_p)
2433{
2434 EMIT_ASM32 (i386_goto,
2435 /* Don't trust the assembler to choose the right jump */
2436 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2437 if (offset_p)
2438 *offset_p = 1;
2439 if (size_p)
2440 *size_p = 4;
2441}
2442
2443static void
2444i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2445{
2446 int diff = (to - (from + size));
2447 unsigned char buf[sizeof (int)];
2448
2449 /* We're only doing 4-byte sizes at the moment. */
2450 if (size != 4)
2451 {
2452 emit_error = 1;
2453 return;
2454 }
2455
2456 memcpy (buf, &diff, sizeof (int));
4196ab2a 2457 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2458}
2459
2460static void
4e29fb54 2461i386_emit_const (LONGEST num)
6a271cae
PA
2462{
2463 unsigned char buf[16];
b00ad6ff 2464 int i, hi, lo;
6a271cae
PA
2465 CORE_ADDR buildaddr = current_insn_ptr;
2466
2467 i = 0;
2468 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2469 lo = num & 0xffffffff;
2470 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2471 i += 4;
2472 hi = ((num >> 32) & 0xffffffff);
2473 if (hi)
2474 {
2475 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2476 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2477 i += 4;
2478 }
2479 else
2480 {
2481 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2482 }
2483 append_insns (&buildaddr, i, buf);
2484 current_insn_ptr = buildaddr;
2485}
2486
2487static void
2488i386_emit_call (CORE_ADDR fn)
2489{
2490 unsigned char buf[16];
2491 int i, offset;
2492 CORE_ADDR buildaddr;
2493
2494 buildaddr = current_insn_ptr;
2495 i = 0;
2496 buf[i++] = 0xe8; /* call <reladdr> */
2497 offset = ((int) fn) - (buildaddr + 5);
2498 memcpy (buf + 1, &offset, 4);
2499 append_insns (&buildaddr, 5, buf);
2500 current_insn_ptr = buildaddr;
2501}
2502
2503static void
2504i386_emit_reg (int reg)
2505{
2506 unsigned char buf[16];
2507 int i;
2508 CORE_ADDR buildaddr;
2509
2510 EMIT_ASM32 (i386_reg_a,
2511 "sub $0x8,%esp");
2512 buildaddr = current_insn_ptr;
2513 i = 0;
2514 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2515 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2516 i += 4;
2517 append_insns (&buildaddr, i, buf);
2518 current_insn_ptr = buildaddr;
2519 EMIT_ASM32 (i386_reg_b,
2520 "mov %eax,4(%esp)\n\t"
2521 "mov 8(%ebp),%eax\n\t"
2522 "mov %eax,(%esp)");
2523 i386_emit_call (get_raw_reg_func_addr ());
2524 EMIT_ASM32 (i386_reg_c,
2525 "xor %ebx,%ebx\n\t"
2526 "lea 0x8(%esp),%esp");
2527}
2528
2529static void
2530i386_emit_pop (void)
2531{
2532 EMIT_ASM32 (i386_pop,
2533 "pop %eax\n\t"
2534 "pop %ebx");
2535}
2536
2537static void
2538i386_emit_stack_flush (void)
2539{
2540 EMIT_ASM32 (i386_stack_flush,
2541 "push %ebx\n\t"
2542 "push %eax");
2543}
2544
2545static void
2546i386_emit_zero_ext (int arg)
2547{
2548 switch (arg)
2549 {
2550 case 8:
2551 EMIT_ASM32 (i386_zero_ext_8,
2552 "and $0xff,%eax\n\t"
2553 "xor %ebx,%ebx");
2554 break;
2555 case 16:
2556 EMIT_ASM32 (i386_zero_ext_16,
2557 "and $0xffff,%eax\n\t"
2558 "xor %ebx,%ebx");
2559 break;
2560 case 32:
2561 EMIT_ASM32 (i386_zero_ext_32,
2562 "xor %ebx,%ebx");
2563 break;
2564 default:
2565 emit_error = 1;
2566 }
2567}
2568
2569static void
2570i386_emit_swap (void)
2571{
2572 EMIT_ASM32 (i386_swap,
2573 "mov %eax,%ecx\n\t"
2574 "mov %ebx,%edx\n\t"
2575 "pop %eax\n\t"
2576 "pop %ebx\n\t"
2577 "push %edx\n\t"
2578 "push %ecx");
2579}
2580
2581static void
2582i386_emit_stack_adjust (int n)
2583{
2584 unsigned char buf[16];
2585 int i;
2586 CORE_ADDR buildaddr = current_insn_ptr;
2587
2588 i = 0;
2589 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2590 buf[i++] = 0x64;
2591 buf[i++] = 0x24;
2592 buf[i++] = n * 8;
2593 append_insns (&buildaddr, i, buf);
2594 current_insn_ptr = buildaddr;
2595}
2596
2597/* FN's prototype is `LONGEST(*fn)(int)'. */
2598
2599static void
2600i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2601{
2602 unsigned char buf[16];
2603 int i;
2604 CORE_ADDR buildaddr;
2605
2606 EMIT_ASM32 (i386_int_call_1_a,
2607 /* Reserve a bit of stack space. */
2608 "sub $0x8,%esp");
2609 /* Put the one argument on the stack. */
2610 buildaddr = current_insn_ptr;
2611 i = 0;
2612 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2613 buf[i++] = 0x04;
2614 buf[i++] = 0x24;
b00ad6ff 2615 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2616 i += 4;
2617 append_insns (&buildaddr, i, buf);
2618 current_insn_ptr = buildaddr;
2619 i386_emit_call (fn);
2620 EMIT_ASM32 (i386_int_call_1_c,
2621 "mov %edx,%ebx\n\t"
2622 "lea 0x8(%esp),%esp");
2623}
2624
4e29fb54 2625/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2626
2627static void
2628i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2629{
2630 unsigned char buf[16];
2631 int i;
2632 CORE_ADDR buildaddr;
2633
2634 EMIT_ASM32 (i386_void_call_2_a,
2635 /* Preserve %eax only; we don't have to worry about %ebx. */
2636 "push %eax\n\t"
2637 /* Reserve a bit of stack space for arguments. */
2638 "sub $0x10,%esp\n\t"
2639 /* Copy "top" to the second argument position. (Note that
2640 we can't assume function won't scribble on its
2641 arguments, so don't try to restore from this.) */
2642 "mov %eax,4(%esp)\n\t"
2643 "mov %ebx,8(%esp)");
2644 /* Put the first argument on the stack. */
2645 buildaddr = current_insn_ptr;
2646 i = 0;
2647 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2648 buf[i++] = 0x04;
2649 buf[i++] = 0x24;
b00ad6ff 2650 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2651 i += 4;
2652 append_insns (&buildaddr, i, buf);
2653 current_insn_ptr = buildaddr;
2654 i386_emit_call (fn);
2655 EMIT_ASM32 (i386_void_call_2_b,
2656 "lea 0x10(%esp),%esp\n\t"
2657 /* Restore original stack top. */
2658 "pop %eax");
2659}
2660
6b9801d4 2661
df4a0200 2662static void
6b9801d4
SS
2663i386_emit_eq_goto (int *offset_p, int *size_p)
2664{
2665 EMIT_ASM32 (eq,
2666 /* Check low half first, more likely to be decider */
2667 "cmpl %eax,(%esp)\n\t"
2668 "jne .Leq_fallthru\n\t"
2669 "cmpl %ebx,4(%esp)\n\t"
2670 "jne .Leq_fallthru\n\t"
2671 "lea 0x8(%esp),%esp\n\t"
2672 "pop %eax\n\t"
2673 "pop %ebx\n\t"
2674 /* jmp, but don't trust the assembler to choose the right jump */
2675 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2676 ".Leq_fallthru:\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2678 "pop %eax\n\t"
2679 "pop %ebx");
2680
2681 if (offset_p)
2682 *offset_p = 18;
2683 if (size_p)
2684 *size_p = 4;
2685}
2686
df4a0200 2687static void
6b9801d4
SS
2688i386_emit_ne_goto (int *offset_p, int *size_p)
2689{
2690 EMIT_ASM32 (ne,
2691 /* Check low half first, more likely to be decider */
2692 "cmpl %eax,(%esp)\n\t"
2693 "jne .Lne_jump\n\t"
2694 "cmpl %ebx,4(%esp)\n\t"
2695 "je .Lne_fallthru\n\t"
2696 ".Lne_jump:\n\t"
2697 "lea 0x8(%esp),%esp\n\t"
2698 "pop %eax\n\t"
2699 "pop %ebx\n\t"
2700 /* jmp, but don't trust the assembler to choose the right jump */
2701 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2702 ".Lne_fallthru:\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2704 "pop %eax\n\t"
2705 "pop %ebx");
2706
2707 if (offset_p)
2708 *offset_p = 18;
2709 if (size_p)
2710 *size_p = 4;
2711}
2712
df4a0200 2713static void
6b9801d4
SS
2714i386_emit_lt_goto (int *offset_p, int *size_p)
2715{
2716 EMIT_ASM32 (lt,
2717 "cmpl %ebx,4(%esp)\n\t"
2718 "jl .Llt_jump\n\t"
2719 "jne .Llt_fallthru\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "jnl .Llt_fallthru\n\t"
2722 ".Llt_jump:\n\t"
2723 "lea 0x8(%esp),%esp\n\t"
2724 "pop %eax\n\t"
2725 "pop %ebx\n\t"
2726 /* jmp, but don't trust the assembler to choose the right jump */
2727 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2728 ".Llt_fallthru:\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2730 "pop %eax\n\t"
2731 "pop %ebx");
2732
2733 if (offset_p)
2734 *offset_p = 20;
2735 if (size_p)
2736 *size_p = 4;
2737}
2738
df4a0200 2739static void
6b9801d4
SS
2740i386_emit_le_goto (int *offset_p, int *size_p)
2741{
2742 EMIT_ASM32 (le,
2743 "cmpl %ebx,4(%esp)\n\t"
2744 "jle .Lle_jump\n\t"
2745 "jne .Lle_fallthru\n\t"
2746 "cmpl %eax,(%esp)\n\t"
2747 "jnle .Lle_fallthru\n\t"
2748 ".Lle_jump:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Lle_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2756 "pop %eax\n\t"
2757 "pop %ebx");
2758
2759 if (offset_p)
2760 *offset_p = 20;
2761 if (size_p)
2762 *size_p = 4;
2763}
2764
df4a0200 2765static void
6b9801d4
SS
2766i386_emit_gt_goto (int *offset_p, int *size_p)
2767{
2768 EMIT_ASM32 (gt,
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "jg .Lgt_jump\n\t"
2771 "jne .Lgt_fallthru\n\t"
2772 "cmpl %eax,(%esp)\n\t"
2773 "jng .Lgt_fallthru\n\t"
2774 ".Lgt_jump:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lgt_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2782 "pop %eax\n\t"
2783 "pop %ebx");
2784
2785 if (offset_p)
2786 *offset_p = 20;
2787 if (size_p)
2788 *size_p = 4;
2789}
2790
df4a0200 2791static void
6b9801d4
SS
2792i386_emit_ge_goto (int *offset_p, int *size_p)
2793{
2794 EMIT_ASM32 (ge,
2795 "cmpl %ebx,4(%esp)\n\t"
2796 "jge .Lge_jump\n\t"
2797 "jne .Lge_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnge .Lge_fallthru\n\t"
2800 ".Lge_jump:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Lge_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2808 "pop %eax\n\t"
2809 "pop %ebx");
2810
2811 if (offset_p)
2812 *offset_p = 20;
2813 if (size_p)
2814 *size_p = 4;
2815}
2816
6a271cae
PA
2817struct emit_ops i386_emit_ops =
2818 {
2819 i386_emit_prologue,
2820 i386_emit_epilogue,
2821 i386_emit_add,
2822 i386_emit_sub,
2823 i386_emit_mul,
2824 i386_emit_lsh,
2825 i386_emit_rsh_signed,
2826 i386_emit_rsh_unsigned,
2827 i386_emit_ext,
2828 i386_emit_log_not,
2829 i386_emit_bit_and,
2830 i386_emit_bit_or,
2831 i386_emit_bit_xor,
2832 i386_emit_bit_not,
2833 i386_emit_equal,
2834 i386_emit_less_signed,
2835 i386_emit_less_unsigned,
2836 i386_emit_ref,
2837 i386_emit_if_goto,
2838 i386_emit_goto,
2839 i386_write_goto_address,
2840 i386_emit_const,
2841 i386_emit_call,
2842 i386_emit_reg,
2843 i386_emit_pop,
2844 i386_emit_stack_flush,
2845 i386_emit_zero_ext,
2846 i386_emit_swap,
2847 i386_emit_stack_adjust,
2848 i386_emit_int_call_1,
6b9801d4
SS
2849 i386_emit_void_call_2,
2850 i386_emit_eq_goto,
2851 i386_emit_ne_goto,
2852 i386_emit_lt_goto,
2853 i386_emit_le_goto,
2854 i386_emit_gt_goto,
2855 i386_emit_ge_goto
6a271cae
PA
2856 };
2857
2858
2859static struct emit_ops *
2860x86_emit_ops (void)
2861{
2862#ifdef __x86_64__
3aee8918 2863 if (is_64bit_tdesc ())
6a271cae
PA
2864 return &amd64_emit_ops;
2865 else
2866#endif
2867 return &i386_emit_ops;
2868}
2869
3ca4edb6 2870/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2871
3ca4edb6
TBA
2872const gdb_byte *
2873x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2874{
2875 *size = x86_breakpoint_len;
2876 return x86_breakpoint;
2877}
2878
c2d6af84
PA
2879static int
2880x86_supports_range_stepping (void)
2881{
2882 return 1;
2883}
2884
7d00775e
AT
2885/* Implementation of linux_target_ops method "supports_hardware_single_step".
2886 */
2887
2888static int
2889x86_supports_hardware_single_step (void)
2890{
2891 return 1;
2892}
2893
ae91f625
MK
2894static int
2895x86_get_ipa_tdesc_idx (void)
2896{
2897 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2898 const struct target_desc *tdesc = regcache->tdesc;
2899
2900#ifdef __x86_64__
b4570e4b 2901 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2902#endif
2903
f49ff000 2904 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2905 return X86_TDESC_SSE;
ae91f625 2906
f49ff000 2907 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2908}
2909
d0722149
DE
2910/* This is initialized assuming an amd64 target.
2911 x86_arch_setup will correct it for i386 or amd64 targets. */
2912
2913struct linux_target_ops the_low_target =
2914{
aa5ca48f
DE
2915 x86_insert_point,
2916 x86_remove_point,
2917 x86_stopped_by_watchpoint,
2918 x86_stopped_data_address,
d0722149
DE
2919 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2920 native i386 case (no registers smaller than an xfer unit), and are not
2921 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2922 NULL,
2923 NULL,
2924 /* need to fix up i386 siginfo if host is amd64 */
2925 x86_siginfo_fixup,
aa5ca48f 2926 x86_linux_new_process,
04ec7890 2927 x86_linux_delete_process,
aa5ca48f 2928 x86_linux_new_thread,
466eecee 2929 x86_linux_delete_thread,
3a8a0396 2930 x86_linux_new_fork,
1570b33e 2931 x86_linux_prepare_to_resume,
219f2f23 2932 x86_linux_process_qsupported,
fa593d66
PA
2933 x86_supports_tracepoints,
2934 x86_get_thread_area,
6a271cae 2935 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2936 x86_emit_ops,
2937 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2938 x86_supports_range_stepping,
7d00775e 2939 x86_supports_hardware_single_step,
82075af2 2940 x86_get_syscall_trapinfo,
ae91f625 2941 x86_get_ipa_tdesc_idx,
d0722149 2942};
3aee8918 2943
ef0478f6
TBA
2944/* The linux target ops object. */
2945
2946linux_process_target *the_linux_target = &the_x86_target;
2947
3aee8918
PA
2948void
2949initialize_low_arch (void)
2950{
2951 /* Initialize the Linux target descriptions. */
2952#ifdef __x86_64__
cc397f3a 2953 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2954 copy_target_description (tdesc_amd64_linux_no_xml,
2955 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2956 false));
3aee8918
PA
2957 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2958#endif
f49ff000 2959
cc397f3a 2960 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2961 copy_target_description (tdesc_i386_linux_no_xml,
2962 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2963 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2964
2965 initialize_regsets_info (&x86_regsets_info);
2966}
This page took 1.019805 seconds and 4 git commands to generate.