gdbserver/linux-low: turn 'supports_tracepoints' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
a5b5da92
TBA
109 void process_qsupported (char **features, int count) override;
110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
797bcff5
TBA
113protected:
114
115 void low_arch_setup () override;
daca57a7
TBA
116
117 bool low_cannot_fetch_register (int regno) override;
118
119 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
120
121 bool low_supports_breakpoints () override;
122
123 CORE_ADDR low_get_pc (regcache *regcache) override;
124
125 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
126
127 int low_decr_pc_after_break () override;
d7146cda
TBA
128
129 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
130
131 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
132 int size, raw_breakpoint *bp) override;
133
134 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
135 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
136
137 bool low_stopped_by_watchpoint () override;
138
139 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
140
141 /* collect_ptrace_register/supply_ptrace_register are not needed in the
142 native i386 case (no registers smaller than an xfer unit), and are not
143 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
144
145 /* Need to fix up i386 siginfo if host is amd64. */
146 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
147 int direction) override;
fd000fb3
TBA
148
149 arch_process_info *low_new_process () override;
150
151 void low_delete_process (arch_process_info *info) override;
152
153 void low_new_thread (lwp_info *) override;
154
155 void low_delete_thread (arch_lwp_info *) override;
156
157 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
158
159 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92
TBA
160
161private:
162
163 /* Update all the target description of all processes; a new GDB
164 connected, and it may or not support xml target descriptions. */
165 void update_xmltarget ();
ef0478f6
TBA
166};
167
168/* The singleton target ops object. */
169
170static x86_target the_x86_target;
171
aa5ca48f
DE
172/* Per-process arch-specific data we want to keep. */
173
174struct arch_process_info
175{
df7e5265 176 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
177};
178
d0722149
DE
179#ifdef __x86_64__
180
181/* Mapping between the general-purpose registers in `struct user'
182 format and GDB's register array layout.
183 Note that the transfer layout uses 64-bit regs. */
184static /*const*/ int i386_regmap[] =
185{
186 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
187 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8
190};
191
192#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
193
194/* So code below doesn't have to care, i386 or amd64. */
195#define ORIG_EAX ORIG_RAX
bc9540e8 196#define REGSIZE 8
d0722149
DE
197
198static const int x86_64_regmap[] =
199{
200 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
201 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
202 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
203 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
204 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
205 DS * 8, ES * 8, FS * 8, GS * 8,
206 -1, -1, -1, -1, -1, -1, -1, -1,
207 -1, -1, -1, -1, -1, -1, -1, -1,
208 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
209 -1,
210 -1, -1, -1, -1, -1, -1, -1, -1,
211 ORIG_RAX * 8,
2735833d
WT
212#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
213 21 * 8, 22 * 8,
214#else
215 -1, -1,
216#endif
a196ebeb 217 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
218 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
219 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
220 -1, -1, -1, -1, -1, -1, -1, -1,
221 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
222 -1, -1, -1, -1, -1, -1, -1, -1,
223 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
224 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
225 -1, -1, -1, -1, -1, -1, -1, -1,
226 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
227 -1, -1, -1, -1, -1, -1, -1, -1,
228 -1 /* pkru */
d0722149
DE
229};
230
231#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 232#define X86_64_USER_REGS (GS + 1)
d0722149
DE
233
234#else /* ! __x86_64__ */
235
236/* Mapping between the general-purpose registers in `struct user'
237 format and GDB's register array layout. */
238static /*const*/ int i386_regmap[] =
239{
240 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
241 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
242 EIP * 4, EFL * 4, CS * 4, SS * 4,
243 DS * 4, ES * 4, FS * 4, GS * 4
244};
245
246#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
247
bc9540e8
PA
248#define REGSIZE 4
249
d0722149 250#endif
3aee8918
PA
251
252#ifdef __x86_64__
253
254/* Returns true if the current inferior belongs to a x86-64 process,
255 per the tdesc. */
256
257static int
258is_64bit_tdesc (void)
259{
0bfdf32f 260 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
261
262 return register_size (regcache->tdesc, 0) == 8;
263}
264
265#endif
266
d0722149
DE
267\f
268/* Called by libthread_db. */
269
270ps_err_e
754653a7 271ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
272 lwpid_t lwpid, int idx, void **base)
273{
274#ifdef __x86_64__
3aee8918 275 int use_64bit = is_64bit_tdesc ();
d0722149
DE
276
277 if (use_64bit)
278 {
279 switch (idx)
280 {
281 case FS:
282 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
283 return PS_OK;
284 break;
285 case GS:
286 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
287 return PS_OK;
288 break;
289 default:
290 return PS_BADADDR;
291 }
292 return PS_ERR;
293 }
294#endif
295
296 {
297 unsigned int desc[4];
298
299 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
300 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
301 return PS_ERR;
302
d1ec4ce7
DE
303 /* Ensure we properly extend the value to 64-bits for x86_64. */
304 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
305 return PS_OK;
306 }
307}
fa593d66
PA
308
309/* Get the thread area address. This is used to recognize which
310 thread is which when tracing with the in-process agent library. We
311 don't read anything from the address, and treat it as opaque; it's
312 the address itself that we assume is unique per-thread. */
313
314static int
315x86_get_thread_area (int lwpid, CORE_ADDR *addr)
316{
317#ifdef __x86_64__
3aee8918 318 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
319
320 if (use_64bit)
321 {
322 void *base;
323 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
324 {
325 *addr = (CORE_ADDR) (uintptr_t) base;
326 return 0;
327 }
328
329 return -1;
330 }
331#endif
332
333 {
f2907e49 334 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
335 struct thread_info *thr = get_lwp_thread (lwp);
336 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
337 unsigned int desc[4];
338 ULONGEST gs = 0;
339 const int reg_thread_area = 3; /* bits to scale down register value. */
340 int idx;
341
342 collect_register_by_name (regcache, "gs", &gs);
343
344 idx = gs >> reg_thread_area;
345
346 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 347 lwpid_of (thr),
493e2a69 348 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
349 return -1;
350
351 *addr = desc[1];
352 return 0;
353 }
354}
355
356
d0722149 357\f
daca57a7
TBA
358bool
359x86_target::low_cannot_store_register (int regno)
d0722149 360{
3aee8918
PA
361#ifdef __x86_64__
362 if (is_64bit_tdesc ())
daca57a7 363 return false;
3aee8918
PA
364#endif
365
d0722149
DE
366 return regno >= I386_NUM_REGS;
367}
368
daca57a7
TBA
369bool
370x86_target::low_cannot_fetch_register (int regno)
d0722149 371{
3aee8918
PA
372#ifdef __x86_64__
373 if (is_64bit_tdesc ())
daca57a7 374 return false;
3aee8918
PA
375#endif
376
d0722149
DE
377 return regno >= I386_NUM_REGS;
378}
379
380static void
442ea881 381x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
382{
383 int i;
384
385#ifdef __x86_64__
3aee8918 386 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
387 {
388 for (i = 0; i < X86_64_NUM_REGS; i++)
389 if (x86_64_regmap[i] != -1)
442ea881 390 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
391
392#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
393 {
394 unsigned long base;
395 int lwpid = lwpid_of (current_thread);
396
397 collect_register_by_name (regcache, "fs_base", &base);
398 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
399
400 collect_register_by_name (regcache, "gs_base", &base);
401 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
402 }
403#endif
404
d0722149
DE
405 return;
406 }
9e0aa64f
JK
407
408 /* 32-bit inferior registers need to be zero-extended.
409 Callers would read uninitialized memory otherwise. */
410 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
411#endif
412
413 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 414 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 415
442ea881 416 collect_register_by_name (regcache, "orig_eax",
bc9540e8 417 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 418
e90a813d 419#ifdef __x86_64__
3f52fdbc
KB
420 /* Sign extend EAX value to avoid potential syscall restart
421 problems.
422
423 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
424 for a detailed explanation. */
425 if (register_size (regcache->tdesc, 0) == 4)
426 {
427 void *ptr = ((gdb_byte *) buf
428 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
429
430 *(int64_t *) ptr = *(int32_t *) ptr;
431 }
e90a813d 432#endif
d0722149
DE
433}
434
435static void
442ea881 436x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
437{
438 int i;
439
440#ifdef __x86_64__
3aee8918 441 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
442 {
443 for (i = 0; i < X86_64_NUM_REGS; i++)
444 if (x86_64_regmap[i] != -1)
442ea881 445 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
446
447#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
448 {
449 unsigned long base;
450 int lwpid = lwpid_of (current_thread);
451
452 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
453 supply_register_by_name (regcache, "fs_base", &base);
454
455 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
456 supply_register_by_name (regcache, "gs_base", &base);
457 }
458#endif
d0722149
DE
459 return;
460 }
461#endif
462
463 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 464 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 465
442ea881 466 supply_register_by_name (regcache, "orig_eax",
bc9540e8 467 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
468}
469
470static void
442ea881 471x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
472{
473#ifdef __x86_64__
442ea881 474 i387_cache_to_fxsave (regcache, buf);
d0722149 475#else
442ea881 476 i387_cache_to_fsave (regcache, buf);
d0722149
DE
477#endif
478}
479
480static void
442ea881 481x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
482{
483#ifdef __x86_64__
442ea881 484 i387_fxsave_to_cache (regcache, buf);
d0722149 485#else
442ea881 486 i387_fsave_to_cache (regcache, buf);
d0722149
DE
487#endif
488}
489
490#ifndef __x86_64__
491
492static void
442ea881 493x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 494{
442ea881 495 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
496}
497
498static void
442ea881 499x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 500{
442ea881 501 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
502}
503
504#endif
505
1570b33e
L
506static void
507x86_fill_xstateregset (struct regcache *regcache, void *buf)
508{
509 i387_cache_to_xsave (regcache, buf);
510}
511
512static void
513x86_store_xstateregset (struct regcache *regcache, const void *buf)
514{
515 i387_xsave_to_cache (regcache, buf);
516}
517
d0722149
DE
518/* ??? The non-biarch i386 case stores all the i387 regs twice.
519 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
520 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
521 doesn't work. IWBN to avoid the duplication in the case where it
522 does work. Maybe the arch_setup routine could check whether it works
3aee8918 523 and update the supported regsets accordingly. */
d0722149 524
3aee8918 525static struct regset_info x86_regsets[] =
d0722149
DE
526{
527#ifdef HAVE_PTRACE_GETREGS
1570b33e 528 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
529 GENERAL_REGS,
530 x86_fill_gregset, x86_store_gregset },
1570b33e
L
531 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
532 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
533# ifndef __x86_64__
534# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 535 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
536 EXTENDED_REGS,
537 x86_fill_fpxregset, x86_store_fpxregset },
538# endif
539# endif
1570b33e 540 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
541 FP_REGS,
542 x86_fill_fpregset, x86_store_fpregset },
543#endif /* HAVE_PTRACE_GETREGS */
50bc912a 544 NULL_REGSET
d0722149
DE
545};
546
bf9ae9d8
TBA
547bool
548x86_target::low_supports_breakpoints ()
549{
550 return true;
551}
552
553CORE_ADDR
554x86_target::low_get_pc (regcache *regcache)
d0722149 555{
3aee8918 556 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
557
558 if (use_64bit)
559 {
6598661d
PA
560 uint64_t pc;
561
442ea881 562 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
563 return (CORE_ADDR) pc;
564 }
565 else
566 {
6598661d
PA
567 uint32_t pc;
568
442ea881 569 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
570 return (CORE_ADDR) pc;
571 }
572}
573
bf9ae9d8
TBA
574void
575x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 576{
3aee8918 577 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
578
579 if (use_64bit)
580 {
6598661d
PA
581 uint64_t newpc = pc;
582
442ea881 583 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
584 }
585 else
586 {
6598661d
PA
587 uint32_t newpc = pc;
588
442ea881 589 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
590 }
591}
d4807ea2
TBA
592
593int
594x86_target::low_decr_pc_after_break ()
595{
596 return 1;
597}
598
d0722149 599\f
dd373349 600static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
601#define x86_breakpoint_len 1
602
d7146cda
TBA
603bool
604x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
605{
606 unsigned char c;
607
d7146cda 608 read_memory (pc, &c, 1);
d0722149 609 if (c == 0xCC)
d7146cda 610 return true;
d0722149 611
d7146cda 612 return false;
d0722149
DE
613}
614\f
42995dbd 615/* Low-level function vector. */
df7e5265 616struct x86_dr_low_type x86_dr_low =
42995dbd 617 {
d33472ad
GB
618 x86_linux_dr_set_control,
619 x86_linux_dr_set_addr,
620 x86_linux_dr_get_addr,
621 x86_linux_dr_get_status,
622 x86_linux_dr_get_control,
42995dbd
GB
623 sizeof (void *),
624 };
aa5ca48f 625\f
90d74c30 626/* Breakpoint/Watchpoint support. */
aa5ca48f 627
007c9b97
TBA
628bool
629x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
630{
631 switch (z_type)
632 {
633 case Z_PACKET_SW_BP:
634 case Z_PACKET_HW_BP:
635 case Z_PACKET_WRITE_WP:
636 case Z_PACKET_ACCESS_WP:
007c9b97 637 return true;
802e8e6d 638 default:
007c9b97 639 return false;
802e8e6d
PA
640 }
641}
642
9db9aa23
TBA
643int
644x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
645 int size, raw_breakpoint *bp)
aa5ca48f
DE
646{
647 struct process_info *proc = current_process ();
802e8e6d 648
aa5ca48f
DE
649 switch (type)
650 {
802e8e6d
PA
651 case raw_bkpt_type_hw:
652 case raw_bkpt_type_write_wp:
653 case raw_bkpt_type_access_wp:
a4165e94 654 {
802e8e6d
PA
655 enum target_hw_bp_type hw_type
656 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 657 struct x86_debug_reg_state *state
fe978cb0 658 = &proc->priv->arch_private->debug_reg_state;
a4165e94 659
df7e5265 660 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 661 }
961bd387 662
aa5ca48f
DE
663 default:
664 /* Unsupported. */
665 return 1;
666 }
667}
668
9db9aa23
TBA
669int
670x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
671 int size, raw_breakpoint *bp)
aa5ca48f
DE
672{
673 struct process_info *proc = current_process ();
802e8e6d 674
aa5ca48f
DE
675 switch (type)
676 {
802e8e6d
PA
677 case raw_bkpt_type_hw:
678 case raw_bkpt_type_write_wp:
679 case raw_bkpt_type_access_wp:
a4165e94 680 {
802e8e6d
PA
681 enum target_hw_bp_type hw_type
682 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 683 struct x86_debug_reg_state *state
fe978cb0 684 = &proc->priv->arch_private->debug_reg_state;
a4165e94 685
df7e5265 686 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 687 }
aa5ca48f
DE
688 default:
689 /* Unsupported. */
690 return 1;
691 }
692}
693
ac1bbaca
TBA
694bool
695x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
696{
697 struct process_info *proc = current_process ();
fe978cb0 698 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
699}
700
ac1bbaca
TBA
701CORE_ADDR
702x86_target::low_stopped_data_address ()
aa5ca48f
DE
703{
704 struct process_info *proc = current_process ();
705 CORE_ADDR addr;
fe978cb0 706 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 707 &addr))
aa5ca48f
DE
708 return addr;
709 return 0;
710}
711\f
712/* Called when a new process is created. */
713
fd000fb3
TBA
714arch_process_info *
715x86_target::low_new_process ()
aa5ca48f 716{
ed859da7 717 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 718
df7e5265 719 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
720
721 return info;
722}
723
04ec7890
SM
724/* Called when a process is being deleted. */
725
fd000fb3
TBA
726void
727x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
728{
729 xfree (info);
730}
731
fd000fb3
TBA
732void
733x86_target::low_new_thread (lwp_info *lwp)
734{
735 /* This comes from nat/. */
736 x86_linux_new_thread (lwp);
737}
3a8a0396 738
fd000fb3
TBA
739void
740x86_target::low_delete_thread (arch_lwp_info *alwp)
741{
742 /* This comes from nat/. */
743 x86_linux_delete_thread (alwp);
744}
745
746/* Target routine for new_fork. */
747
748void
749x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
750{
751 /* These are allocated by linux_add_process. */
752 gdb_assert (parent->priv != NULL
753 && parent->priv->arch_private != NULL);
754 gdb_assert (child->priv != NULL
755 && child->priv->arch_private != NULL);
756
757 /* Linux kernel before 2.6.33 commit
758 72f674d203cd230426437cdcf7dd6f681dad8b0d
759 will inherit hardware debug registers from parent
760 on fork/vfork/clone. Newer Linux kernels create such tasks with
761 zeroed debug registers.
762
763 GDB core assumes the child inherits the watchpoints/hw
764 breakpoints of the parent, and will remove them all from the
765 forked off process. Copy the debug registers mirrors into the
766 new process so that all breakpoints and watchpoints can be
767 removed together. The debug registers mirror will become zeroed
768 in the end before detaching the forked off process, thus making
769 this compatible with older Linux kernels too. */
770
771 *child->priv->arch_private = *parent->priv->arch_private;
772}
773
d7599cc0
TBA
774void
775x86_target::low_prepare_to_resume (lwp_info *lwp)
776{
777 /* This comes from nat/. */
778 x86_linux_prepare_to_resume (lwp);
779}
780
70a0bb6b
GB
781/* See nat/x86-dregs.h. */
782
783struct x86_debug_reg_state *
784x86_debug_reg_state (pid_t pid)
785{
786 struct process_info *proc = find_process_pid (pid);
787
788 return &proc->priv->arch_private->debug_reg_state;
789}
aa5ca48f 790\f
d0722149
DE
791/* When GDBSERVER is built as a 64-bit application on linux, the
792 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
793 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
794 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
795 conversion in-place ourselves. */
796
9cf12d57 797/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
798 layout of the inferiors' architecture. Returns true if any
799 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 800 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
801 INF. */
802
cb63de7c
TBA
803bool
804x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
805{
806#ifdef __x86_64__
760256f9 807 unsigned int machine;
0bfdf32f 808 int tid = lwpid_of (current_thread);
760256f9
PA
809 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
810
d0722149 811 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 812 if (!is_64bit_tdesc ())
9cf12d57 813 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 814 FIXUP_32);
c92b5177 815 /* No fixup for native x32 GDB. */
760256f9 816 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 817 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 818 FIXUP_X32);
d0722149
DE
819#endif
820
cb63de7c 821 return false;
d0722149
DE
822}
823\f
1570b33e
L
824static int use_xml;
825
3aee8918
PA
826/* Format of XSAVE extended state is:
827 struct
828 {
829 fxsave_bytes[0..463]
830 sw_usable_bytes[464..511]
831 xstate_hdr_bytes[512..575]
832 avx_bytes[576..831]
833 future_state etc
834 };
835
836 Same memory layout will be used for the coredump NT_X86_XSTATE
837 representing the XSAVE extended state registers.
838
839 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
840 extended state mask, which is the same as the extended control register
841 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
842 together with the mask saved in the xstate_hdr_bytes to determine what
843 states the processor/OS supports and what state, used or initialized,
844 the process/thread is in. */
845#define I386_LINUX_XSAVE_XCR0_OFFSET 464
846
847/* Does the current host support the GETFPXREGS request? The header
848 file may or may not define it, and even if it is defined, the
849 kernel will return EIO if it's running on a pre-SSE processor. */
850int have_ptrace_getfpxregs =
851#ifdef HAVE_PTRACE_GETFPXREGS
852 -1
853#else
854 0
855#endif
856;
1570b33e 857
3aee8918
PA
858/* Get Linux/x86 target description from running target. */
859
860static const struct target_desc *
861x86_linux_read_description (void)
1570b33e 862{
3aee8918
PA
863 unsigned int machine;
864 int is_elf64;
a196ebeb 865 int xcr0_features;
3aee8918
PA
866 int tid;
867 static uint64_t xcr0;
3a13a53b 868 struct regset_info *regset;
1570b33e 869
0bfdf32f 870 tid = lwpid_of (current_thread);
1570b33e 871
3aee8918 872 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 873
3aee8918 874 if (sizeof (void *) == 4)
3a13a53b 875 {
3aee8918
PA
876 if (is_elf64 > 0)
877 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
878#ifndef __x86_64__
879 else if (machine == EM_X86_64)
880 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
881#endif
882 }
3a13a53b 883
3aee8918
PA
884#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
885 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
886 {
887 elf_fpxregset_t fpxregs;
3a13a53b 888
3aee8918 889 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 890 {
3aee8918
PA
891 have_ptrace_getfpxregs = 0;
892 have_ptrace_getregset = 0;
f49ff000 893 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 894 }
3aee8918
PA
895 else
896 have_ptrace_getfpxregs = 1;
3a13a53b 897 }
1570b33e
L
898#endif
899
900 if (!use_xml)
901 {
df7e5265 902 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 903
1570b33e
L
904 /* Don't use XML. */
905#ifdef __x86_64__
3aee8918
PA
906 if (machine == EM_X86_64)
907 return tdesc_amd64_linux_no_xml;
1570b33e 908 else
1570b33e 909#endif
3aee8918 910 return tdesc_i386_linux_no_xml;
1570b33e
L
911 }
912
1570b33e
L
913 if (have_ptrace_getregset == -1)
914 {
df7e5265 915 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 916 struct iovec iov;
1570b33e
L
917
918 iov.iov_base = xstateregs;
919 iov.iov_len = sizeof (xstateregs);
920
921 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
922 if (ptrace (PTRACE_GETREGSET, tid,
923 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
924 have_ptrace_getregset = 0;
925 else
1570b33e 926 {
3aee8918
PA
927 have_ptrace_getregset = 1;
928
929 /* Get XCR0 from XSAVE extended state. */
930 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
931 / sizeof (uint64_t))];
932
933 /* Use PTRACE_GETREGSET if it is available. */
934 for (regset = x86_regsets;
935 regset->fill_function != NULL; regset++)
936 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 937 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
938 else if (regset->type != GENERAL_REGS)
939 regset->size = 0;
1570b33e 940 }
1570b33e
L
941 }
942
3aee8918 943 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 944 xcr0_features = (have_ptrace_getregset
2e1e43e1 945 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 946
a196ebeb 947 if (xcr0_features)
3aee8918 948 x86_xcr0 = xcr0;
1570b33e 949
3aee8918
PA
950 if (machine == EM_X86_64)
951 {
1570b33e 952#ifdef __x86_64__
b4570e4b 953 const target_desc *tdesc = NULL;
a196ebeb 954
b4570e4b 955 if (xcr0_features)
3aee8918 956 {
b4570e4b
YQ
957 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
958 !is_elf64);
1570b33e 959 }
b4570e4b
YQ
960
961 if (tdesc == NULL)
962 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
963 return tdesc;
3aee8918 964#endif
1570b33e 965 }
3aee8918
PA
966 else
967 {
f49ff000 968 const target_desc *tdesc = NULL;
a1fa17ee 969
f49ff000
YQ
970 if (xcr0_features)
971 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 972
f49ff000
YQ
973 if (tdesc == NULL)
974 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 975
f49ff000 976 return tdesc;
3aee8918
PA
977 }
978
979 gdb_assert_not_reached ("failed to return tdesc");
980}
981
3aee8918
PA
982/* Update all the target description of all processes; a new GDB
983 connected, and it may or not support xml target descriptions. */
984
797bcff5
TBA
985void
986x86_target::update_xmltarget ()
3aee8918 987{
0bfdf32f 988 struct thread_info *saved_thread = current_thread;
3aee8918
PA
989
990 /* Before changing the register cache's internal layout, flush the
991 contents of the current valid caches back to the threads, and
992 release the current regcache objects. */
993 regcache_release ();
994
797bcff5 995 for_each_process ([this] (process_info *proc) {
9179355e
SM
996 int pid = proc->pid;
997
998 /* Look up any thread of this process. */
999 current_thread = find_any_thread_of_pid (pid);
1000
797bcff5 1001 low_arch_setup ();
9179355e 1002 });
3aee8918 1003
0bfdf32f 1004 current_thread = saved_thread;
1570b33e
L
1005}
1006
1007/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1008 PTRACE_GETREGSET. */
1009
a5b5da92
TBA
1010void
1011x86_target::process_qsupported (char **features, int count)
1570b33e 1012{
06e03fff
PA
1013 int i;
1014
1570b33e
L
1015 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1016 with "i386" in qSupported query, it supports x86 XML target
1017 descriptions. */
1018 use_xml = 0;
06e03fff 1019 for (i = 0; i < count; i++)
1570b33e 1020 {
06e03fff 1021 const char *feature = features[i];
1570b33e 1022
06e03fff 1023 if (startswith (feature, "xmlRegisters="))
1570b33e 1024 {
06e03fff 1025 char *copy = xstrdup (feature + 13);
06e03fff 1026
ca3a04f6
CB
1027 char *saveptr;
1028 for (char *p = strtok_r (copy, ",", &saveptr);
1029 p != NULL;
1030 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1031 {
06e03fff
PA
1032 if (strcmp (p, "i386") == 0)
1033 {
1034 use_xml = 1;
1035 break;
1036 }
1570b33e 1037 }
1570b33e 1038
06e03fff
PA
1039 free (copy);
1040 }
1570b33e 1041 }
a5b5da92 1042 update_xmltarget ();
1570b33e
L
1043}
1044
3aee8918 1045/* Common for x86/x86-64. */
d0722149 1046
3aee8918
PA
1047static struct regsets_info x86_regsets_info =
1048 {
1049 x86_regsets, /* regsets */
1050 0, /* num_regsets */
1051 NULL, /* disabled_regsets */
1052 };
214d508e
L
1053
1054#ifdef __x86_64__
3aee8918
PA
1055static struct regs_info amd64_linux_regs_info =
1056 {
1057 NULL, /* regset_bitmap */
1058 NULL, /* usrregs_info */
1059 &x86_regsets_info
1060 };
d0722149 1061#endif
3aee8918
PA
1062static struct usrregs_info i386_linux_usrregs_info =
1063 {
1064 I386_NUM_REGS,
1065 i386_regmap,
1066 };
d0722149 1067
3aee8918
PA
1068static struct regs_info i386_linux_regs_info =
1069 {
1070 NULL, /* regset_bitmap */
1071 &i386_linux_usrregs_info,
1072 &x86_regsets_info
1073 };
d0722149 1074
aa8d21c9
TBA
1075const regs_info *
1076x86_target::get_regs_info ()
3aee8918
PA
1077{
1078#ifdef __x86_64__
1079 if (is_64bit_tdesc ())
1080 return &amd64_linux_regs_info;
1081 else
1082#endif
1083 return &i386_linux_regs_info;
1084}
d0722149 1085
3aee8918
PA
1086/* Initialize the target description for the architecture of the
1087 inferior. */
1570b33e 1088
797bcff5
TBA
1089void
1090x86_target::low_arch_setup ()
3aee8918
PA
1091{
1092 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1093}
1094
82075af2
JS
1095/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1096 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1097
1098static void
4cc32bec 1099x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1100{
1101 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1102
1103 if (use_64bit)
1104 {
1105 long l_sysno;
82075af2
JS
1106
1107 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1108 *sysno = (int) l_sysno;
82075af2
JS
1109 }
1110 else
4cc32bec 1111 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1112}
1113
47f70aa7
TBA
1114bool
1115x86_target::supports_tracepoints ()
219f2f23 1116{
47f70aa7 1117 return true;
219f2f23
PA
1118}
1119
fa593d66
PA
1120static void
1121append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1122{
4196ab2a 1123 target_write_memory (*to, buf, len);
fa593d66
PA
1124 *to += len;
1125}
1126
1127static int
a121b7c1 1128push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1129{
1130 unsigned char *buf_org = buf;
1131
1132 while (1)
1133 {
1134 char *endptr;
1135 unsigned long ul = strtoul (op, &endptr, 16);
1136
1137 if (endptr == op)
1138 break;
1139
1140 *buf++ = ul;
1141 op = endptr;
1142 }
1143
1144 return buf - buf_org;
1145}
1146
1147#ifdef __x86_64__
1148
1149/* Build a jump pad that saves registers and calls a collection
1150 function. Writes a jump instruction to the jump pad to
1151 JJUMPAD_INSN. The caller is responsible to write it in at the
1152 tracepoint address. */
1153
1154static int
1155amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1156 CORE_ADDR collector,
1157 CORE_ADDR lockaddr,
1158 ULONGEST orig_size,
1159 CORE_ADDR *jump_entry,
405f8e94
SS
1160 CORE_ADDR *trampoline,
1161 ULONGEST *trampoline_size,
fa593d66
PA
1162 unsigned char *jjump_pad_insn,
1163 ULONGEST *jjump_pad_insn_size,
1164 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1165 CORE_ADDR *adjusted_insn_addr_end,
1166 char *err)
fa593d66
PA
1167{
1168 unsigned char buf[40];
1169 int i, offset;
f4647387
YQ
1170 int64_t loffset;
1171
fa593d66
PA
1172 CORE_ADDR buildaddr = *jump_entry;
1173
1174 /* Build the jump pad. */
1175
1176 /* First, do tracepoint data collection. Save registers. */
1177 i = 0;
1178 /* Need to ensure stack pointer saved first. */
1179 buf[i++] = 0x54; /* push %rsp */
1180 buf[i++] = 0x55; /* push %rbp */
1181 buf[i++] = 0x57; /* push %rdi */
1182 buf[i++] = 0x56; /* push %rsi */
1183 buf[i++] = 0x52; /* push %rdx */
1184 buf[i++] = 0x51; /* push %rcx */
1185 buf[i++] = 0x53; /* push %rbx */
1186 buf[i++] = 0x50; /* push %rax */
1187 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1188 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1189 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1190 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1191 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1192 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1193 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1194 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1195 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1196 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1197 buf[i++] = 0xbf;
c8ef42ee
PA
1198 memcpy (buf + i, &tpaddr, 8);
1199 i += 8;
fa593d66
PA
1200 buf[i++] = 0x57; /* push %rdi */
1201 append_insns (&buildaddr, i, buf);
1202
1203 /* Stack space for the collecting_t object. */
1204 i = 0;
1205 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1206 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1207 memcpy (buf + i, &tpoint, 8);
1208 i += 8;
1209 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1210 i += push_opcode (&buf[i],
1211 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1212 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1213 append_insns (&buildaddr, i, buf);
1214
1215 /* spin-lock. */
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1218 memcpy (&buf[i], (void *) &lockaddr, 8);
1219 i += 8;
1220 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1221 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1222 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1223 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1224 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* Set up the gdb_collect call. */
1228 /* At this point, (stack pointer + 0x18) is the base of our saved
1229 register block. */
1230
1231 i = 0;
1232 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1233 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1234
1235 /* tpoint address may be 64-bit wide. */
1236 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1237 memcpy (buf + i, &tpoint, 8);
1238 i += 8;
1239 append_insns (&buildaddr, i, buf);
1240
1241 /* The collector function being in the shared library, may be
1242 >31-bits away off the jump pad. */
1243 i = 0;
1244 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1245 memcpy (buf + i, &collector, 8);
1246 i += 8;
1247 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1248 append_insns (&buildaddr, i, buf);
1249
1250 /* Clear the spin-lock. */
1251 i = 0;
1252 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1253 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1254 memcpy (buf + i, &lockaddr, 8);
1255 i += 8;
1256 append_insns (&buildaddr, i, buf);
1257
1258 /* Remove stack that had been used for the collect_t object. */
1259 i = 0;
1260 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1261 append_insns (&buildaddr, i, buf);
1262
1263 /* Restore register state. */
1264 i = 0;
1265 buf[i++] = 0x48; /* add $0x8,%rsp */
1266 buf[i++] = 0x83;
1267 buf[i++] = 0xc4;
1268 buf[i++] = 0x08;
1269 buf[i++] = 0x9d; /* popfq */
1270 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1271 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1272 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1273 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1274 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1275 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1276 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1277 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1278 buf[i++] = 0x58; /* pop %rax */
1279 buf[i++] = 0x5b; /* pop %rbx */
1280 buf[i++] = 0x59; /* pop %rcx */
1281 buf[i++] = 0x5a; /* pop %rdx */
1282 buf[i++] = 0x5e; /* pop %rsi */
1283 buf[i++] = 0x5f; /* pop %rdi */
1284 buf[i++] = 0x5d; /* pop %rbp */
1285 buf[i++] = 0x5c; /* pop %rsp */
1286 append_insns (&buildaddr, i, buf);
1287
1288 /* Now, adjust the original instruction to execute in the jump
1289 pad. */
1290 *adjusted_insn_addr = buildaddr;
1291 relocate_instruction (&buildaddr, tpaddr);
1292 *adjusted_insn_addr_end = buildaddr;
1293
1294 /* Finally, write a jump back to the program. */
f4647387
YQ
1295
1296 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1297 if (loffset > INT_MAX || loffset < INT_MIN)
1298 {
1299 sprintf (err,
1300 "E.Jump back from jump pad too far from tracepoint "
1301 "(offset 0x%" PRIx64 " > int32).", loffset);
1302 return 1;
1303 }
1304
1305 offset = (int) loffset;
fa593d66
PA
1306 memcpy (buf, jump_insn, sizeof (jump_insn));
1307 memcpy (buf + 1, &offset, 4);
1308 append_insns (&buildaddr, sizeof (jump_insn), buf);
1309
1310 /* The jump pad is now built. Wire in a jump to our jump pad. This
1311 is always done last (by our caller actually), so that we can
1312 install fast tracepoints with threads running. This relies on
1313 the agent's atomic write support. */
f4647387
YQ
1314 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1315 if (loffset > INT_MAX || loffset < INT_MIN)
1316 {
1317 sprintf (err,
1318 "E.Jump pad too far from tracepoint "
1319 "(offset 0x%" PRIx64 " > int32).", loffset);
1320 return 1;
1321 }
1322
1323 offset = (int) loffset;
1324
fa593d66
PA
1325 memcpy (buf, jump_insn, sizeof (jump_insn));
1326 memcpy (buf + 1, &offset, 4);
1327 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1328 *jjump_pad_insn_size = sizeof (jump_insn);
1329
1330 /* Return the end address of our pad. */
1331 *jump_entry = buildaddr;
1332
1333 return 0;
1334}
1335
1336#endif /* __x86_64__ */
1337
1338/* Build a jump pad that saves registers and calls a collection
1339 function. Writes a jump instruction to the jump pad to
1340 JJUMPAD_INSN. The caller is responsible to write it in at the
1341 tracepoint address. */
1342
1343static int
1344i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1345 CORE_ADDR collector,
1346 CORE_ADDR lockaddr,
1347 ULONGEST orig_size,
1348 CORE_ADDR *jump_entry,
405f8e94
SS
1349 CORE_ADDR *trampoline,
1350 ULONGEST *trampoline_size,
fa593d66
PA
1351 unsigned char *jjump_pad_insn,
1352 ULONGEST *jjump_pad_insn_size,
1353 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1354 CORE_ADDR *adjusted_insn_addr_end,
1355 char *err)
fa593d66
PA
1356{
1357 unsigned char buf[0x100];
1358 int i, offset;
1359 CORE_ADDR buildaddr = *jump_entry;
1360
1361 /* Build the jump pad. */
1362
1363 /* First, do tracepoint data collection. Save registers. */
1364 i = 0;
1365 buf[i++] = 0x60; /* pushad */
1366 buf[i++] = 0x68; /* push tpaddr aka $pc */
1367 *((int *)(buf + i)) = (int) tpaddr;
1368 i += 4;
1369 buf[i++] = 0x9c; /* pushf */
1370 buf[i++] = 0x1e; /* push %ds */
1371 buf[i++] = 0x06; /* push %es */
1372 buf[i++] = 0x0f; /* push %fs */
1373 buf[i++] = 0xa0;
1374 buf[i++] = 0x0f; /* push %gs */
1375 buf[i++] = 0xa8;
1376 buf[i++] = 0x16; /* push %ss */
1377 buf[i++] = 0x0e; /* push %cs */
1378 append_insns (&buildaddr, i, buf);
1379
1380 /* Stack space for the collecting_t object. */
1381 i = 0;
1382 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1383
1384 /* Build the object. */
1385 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1386 memcpy (buf + i, &tpoint, 4);
1387 i += 4;
1388 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1389
1390 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1391 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1392 append_insns (&buildaddr, i, buf);
1393
1394 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1395 If we cared for it, this could be using xchg alternatively. */
1396
1397 i = 0;
1398 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1399 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1400 %esp,<lockaddr> */
1401 memcpy (&buf[i], (void *) &lockaddr, 4);
1402 i += 4;
1403 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1404 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1405 append_insns (&buildaddr, i, buf);
1406
1407
1408 /* Set up arguments to the gdb_collect call. */
1409 i = 0;
1410 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1411 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1412 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1413 append_insns (&buildaddr, i, buf);
1414
1415 i = 0;
1416 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1417 append_insns (&buildaddr, i, buf);
1418
1419 i = 0;
1420 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1421 memcpy (&buf[i], (void *) &tpoint, 4);
1422 i += 4;
1423 append_insns (&buildaddr, i, buf);
1424
1425 buf[0] = 0xe8; /* call <reladdr> */
1426 offset = collector - (buildaddr + sizeof (jump_insn));
1427 memcpy (buf + 1, &offset, 4);
1428 append_insns (&buildaddr, 5, buf);
1429 /* Clean up after the call. */
1430 buf[0] = 0x83; /* add $0x8,%esp */
1431 buf[1] = 0xc4;
1432 buf[2] = 0x08;
1433 append_insns (&buildaddr, 3, buf);
1434
1435
1436 /* Clear the spin-lock. This would need the LOCK prefix on older
1437 broken archs. */
1438 i = 0;
1439 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1440 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1441 memcpy (buf + i, &lockaddr, 4);
1442 i += 4;
1443 append_insns (&buildaddr, i, buf);
1444
1445
1446 /* Remove stack that had been used for the collect_t object. */
1447 i = 0;
1448 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1449 append_insns (&buildaddr, i, buf);
1450
1451 i = 0;
1452 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1453 buf[i++] = 0xc4;
1454 buf[i++] = 0x04;
1455 buf[i++] = 0x17; /* pop %ss */
1456 buf[i++] = 0x0f; /* pop %gs */
1457 buf[i++] = 0xa9;
1458 buf[i++] = 0x0f; /* pop %fs */
1459 buf[i++] = 0xa1;
1460 buf[i++] = 0x07; /* pop %es */
405f8e94 1461 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1462 buf[i++] = 0x9d; /* popf */
1463 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1464 buf[i++] = 0xc4;
1465 buf[i++] = 0x04;
1466 buf[i++] = 0x61; /* popad */
1467 append_insns (&buildaddr, i, buf);
1468
1469 /* Now, adjust the original instruction to execute in the jump
1470 pad. */
1471 *adjusted_insn_addr = buildaddr;
1472 relocate_instruction (&buildaddr, tpaddr);
1473 *adjusted_insn_addr_end = buildaddr;
1474
1475 /* Write the jump back to the program. */
1476 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1477 memcpy (buf, jump_insn, sizeof (jump_insn));
1478 memcpy (buf + 1, &offset, 4);
1479 append_insns (&buildaddr, sizeof (jump_insn), buf);
1480
1481 /* The jump pad is now built. Wire in a jump to our jump pad. This
1482 is always done last (by our caller actually), so that we can
1483 install fast tracepoints with threads running. This relies on
1484 the agent's atomic write support. */
405f8e94
SS
1485 if (orig_size == 4)
1486 {
1487 /* Create a trampoline. */
1488 *trampoline_size = sizeof (jump_insn);
1489 if (!claim_trampoline_space (*trampoline_size, trampoline))
1490 {
1491 /* No trampoline space available. */
1492 strcpy (err,
1493 "E.Cannot allocate trampoline space needed for fast "
1494 "tracepoints on 4-byte instructions.");
1495 return 1;
1496 }
1497
1498 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1499 memcpy (buf, jump_insn, sizeof (jump_insn));
1500 memcpy (buf + 1, &offset, 4);
4196ab2a 1501 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1502
1503 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1504 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1505 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1506 memcpy (buf + 2, &offset, 2);
1507 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1508 *jjump_pad_insn_size = sizeof (small_jump_insn);
1509 }
1510 else
1511 {
1512 /* Else use a 32-bit relative jump instruction. */
1513 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1514 memcpy (buf, jump_insn, sizeof (jump_insn));
1515 memcpy (buf + 1, &offset, 4);
1516 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1517 *jjump_pad_insn_size = sizeof (jump_insn);
1518 }
fa593d66
PA
1519
1520 /* Return the end address of our pad. */
1521 *jump_entry = buildaddr;
1522
1523 return 0;
1524}
1525
1526static int
1527x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1528 CORE_ADDR collector,
1529 CORE_ADDR lockaddr,
1530 ULONGEST orig_size,
1531 CORE_ADDR *jump_entry,
405f8e94
SS
1532 CORE_ADDR *trampoline,
1533 ULONGEST *trampoline_size,
fa593d66
PA
1534 unsigned char *jjump_pad_insn,
1535 ULONGEST *jjump_pad_insn_size,
1536 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1537 CORE_ADDR *adjusted_insn_addr_end,
1538 char *err)
fa593d66
PA
1539{
1540#ifdef __x86_64__
3aee8918 1541 if (is_64bit_tdesc ())
fa593d66
PA
1542 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1543 collector, lockaddr,
1544 orig_size, jump_entry,
405f8e94 1545 trampoline, trampoline_size,
fa593d66
PA
1546 jjump_pad_insn,
1547 jjump_pad_insn_size,
1548 adjusted_insn_addr,
405f8e94
SS
1549 adjusted_insn_addr_end,
1550 err);
fa593d66
PA
1551#endif
1552
1553 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1554 collector, lockaddr,
1555 orig_size, jump_entry,
405f8e94 1556 trampoline, trampoline_size,
fa593d66
PA
1557 jjump_pad_insn,
1558 jjump_pad_insn_size,
1559 adjusted_insn_addr,
405f8e94
SS
1560 adjusted_insn_addr_end,
1561 err);
1562}
1563
1564/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1565 architectures. */
1566
1567static int
1568x86_get_min_fast_tracepoint_insn_len (void)
1569{
1570 static int warned_about_fast_tracepoints = 0;
1571
1572#ifdef __x86_64__
1573 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1574 used for fast tracepoints. */
3aee8918 1575 if (is_64bit_tdesc ())
405f8e94
SS
1576 return 5;
1577#endif
1578
58b4daa5 1579 if (agent_loaded_p ())
405f8e94
SS
1580 {
1581 char errbuf[IPA_BUFSIZ];
1582
1583 errbuf[0] = '\0';
1584
1585 /* On x86, if trampolines are available, then 4-byte jump instructions
1586 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1587 with a 4-byte offset are used instead. */
1588 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1589 return 4;
1590 else
1591 {
1592 /* GDB has no channel to explain to user why a shorter fast
1593 tracepoint is not possible, but at least make GDBserver
1594 mention that something has gone awry. */
1595 if (!warned_about_fast_tracepoints)
1596 {
422186a9 1597 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1598 warned_about_fast_tracepoints = 1;
1599 }
1600 return 5;
1601 }
1602 }
1603 else
1604 {
1605 /* Indicate that the minimum length is currently unknown since the IPA
1606 has not loaded yet. */
1607 return 0;
1608 }
fa593d66
PA
1609}
1610
6a271cae
PA
1611static void
1612add_insns (unsigned char *start, int len)
1613{
1614 CORE_ADDR buildaddr = current_insn_ptr;
1615
1616 if (debug_threads)
87ce2a04
DE
1617 debug_printf ("Adding %d bytes of insn at %s\n",
1618 len, paddress (buildaddr));
6a271cae
PA
1619
1620 append_insns (&buildaddr, len, start);
1621 current_insn_ptr = buildaddr;
1622}
1623
6a271cae
PA
1624/* Our general strategy for emitting code is to avoid specifying raw
1625 bytes whenever possible, and instead copy a block of inline asm
1626 that is embedded in the function. This is a little messy, because
1627 we need to keep the compiler from discarding what looks like dead
1628 code, plus suppress various warnings. */
1629
9e4344e5
PA
1630#define EMIT_ASM(NAME, INSNS) \
1631 do \
1632 { \
1633 extern unsigned char start_ ## NAME, end_ ## NAME; \
1634 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1635 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1636 "\t" "start_" #NAME ":" \
1637 "\t" INSNS "\n" \
1638 "\t" "end_" #NAME ":"); \
1639 } while (0)
6a271cae
PA
1640
1641#ifdef __x86_64__
1642
1643#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1644 do \
1645 { \
1646 extern unsigned char start_ ## NAME, end_ ## NAME; \
1647 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1648 __asm__ (".code32\n" \
1649 "\t" "jmp end_" #NAME "\n" \
1650 "\t" "start_" #NAME ":\n" \
1651 "\t" INSNS "\n" \
1652 "\t" "end_" #NAME ":\n" \
1653 ".code64\n"); \
1654 } while (0)
6a271cae
PA
1655
1656#else
1657
1658#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1659
1660#endif
1661
1662#ifdef __x86_64__
1663
1664static void
1665amd64_emit_prologue (void)
1666{
1667 EMIT_ASM (amd64_prologue,
1668 "pushq %rbp\n\t"
1669 "movq %rsp,%rbp\n\t"
1670 "sub $0x20,%rsp\n\t"
1671 "movq %rdi,-8(%rbp)\n\t"
1672 "movq %rsi,-16(%rbp)");
1673}
1674
1675
1676static void
1677amd64_emit_epilogue (void)
1678{
1679 EMIT_ASM (amd64_epilogue,
1680 "movq -16(%rbp),%rdi\n\t"
1681 "movq %rax,(%rdi)\n\t"
1682 "xor %rax,%rax\n\t"
1683 "leave\n\t"
1684 "ret");
1685}
1686
1687static void
1688amd64_emit_add (void)
1689{
1690 EMIT_ASM (amd64_add,
1691 "add (%rsp),%rax\n\t"
1692 "lea 0x8(%rsp),%rsp");
1693}
1694
1695static void
1696amd64_emit_sub (void)
1697{
1698 EMIT_ASM (amd64_sub,
1699 "sub %rax,(%rsp)\n\t"
1700 "pop %rax");
1701}
1702
1703static void
1704amd64_emit_mul (void)
1705{
1706 emit_error = 1;
1707}
1708
1709static void
1710amd64_emit_lsh (void)
1711{
1712 emit_error = 1;
1713}
1714
1715static void
1716amd64_emit_rsh_signed (void)
1717{
1718 emit_error = 1;
1719}
1720
1721static void
1722amd64_emit_rsh_unsigned (void)
1723{
1724 emit_error = 1;
1725}
1726
1727static void
1728amd64_emit_ext (int arg)
1729{
1730 switch (arg)
1731 {
1732 case 8:
1733 EMIT_ASM (amd64_ext_8,
1734 "cbtw\n\t"
1735 "cwtl\n\t"
1736 "cltq");
1737 break;
1738 case 16:
1739 EMIT_ASM (amd64_ext_16,
1740 "cwtl\n\t"
1741 "cltq");
1742 break;
1743 case 32:
1744 EMIT_ASM (amd64_ext_32,
1745 "cltq");
1746 break;
1747 default:
1748 emit_error = 1;
1749 }
1750}
1751
1752static void
1753amd64_emit_log_not (void)
1754{
1755 EMIT_ASM (amd64_log_not,
1756 "test %rax,%rax\n\t"
1757 "sete %cl\n\t"
1758 "movzbq %cl,%rax");
1759}
1760
1761static void
1762amd64_emit_bit_and (void)
1763{
1764 EMIT_ASM (amd64_and,
1765 "and (%rsp),%rax\n\t"
1766 "lea 0x8(%rsp),%rsp");
1767}
1768
1769static void
1770amd64_emit_bit_or (void)
1771{
1772 EMIT_ASM (amd64_or,
1773 "or (%rsp),%rax\n\t"
1774 "lea 0x8(%rsp),%rsp");
1775}
1776
1777static void
1778amd64_emit_bit_xor (void)
1779{
1780 EMIT_ASM (amd64_xor,
1781 "xor (%rsp),%rax\n\t"
1782 "lea 0x8(%rsp),%rsp");
1783}
1784
1785static void
1786amd64_emit_bit_not (void)
1787{
1788 EMIT_ASM (amd64_bit_not,
1789 "xorq $0xffffffffffffffff,%rax");
1790}
1791
1792static void
1793amd64_emit_equal (void)
1794{
1795 EMIT_ASM (amd64_equal,
1796 "cmp %rax,(%rsp)\n\t"
1797 "je .Lamd64_equal_true\n\t"
1798 "xor %rax,%rax\n\t"
1799 "jmp .Lamd64_equal_end\n\t"
1800 ".Lamd64_equal_true:\n\t"
1801 "mov $0x1,%rax\n\t"
1802 ".Lamd64_equal_end:\n\t"
1803 "lea 0x8(%rsp),%rsp");
1804}
1805
1806static void
1807amd64_emit_less_signed (void)
1808{
1809 EMIT_ASM (amd64_less_signed,
1810 "cmp %rax,(%rsp)\n\t"
1811 "jl .Lamd64_less_signed_true\n\t"
1812 "xor %rax,%rax\n\t"
1813 "jmp .Lamd64_less_signed_end\n\t"
1814 ".Lamd64_less_signed_true:\n\t"
1815 "mov $1,%rax\n\t"
1816 ".Lamd64_less_signed_end:\n\t"
1817 "lea 0x8(%rsp),%rsp");
1818}
1819
1820static void
1821amd64_emit_less_unsigned (void)
1822{
1823 EMIT_ASM (amd64_less_unsigned,
1824 "cmp %rax,(%rsp)\n\t"
1825 "jb .Lamd64_less_unsigned_true\n\t"
1826 "xor %rax,%rax\n\t"
1827 "jmp .Lamd64_less_unsigned_end\n\t"
1828 ".Lamd64_less_unsigned_true:\n\t"
1829 "mov $1,%rax\n\t"
1830 ".Lamd64_less_unsigned_end:\n\t"
1831 "lea 0x8(%rsp),%rsp");
1832}
1833
1834static void
1835amd64_emit_ref (int size)
1836{
1837 switch (size)
1838 {
1839 case 1:
1840 EMIT_ASM (amd64_ref1,
1841 "movb (%rax),%al");
1842 break;
1843 case 2:
1844 EMIT_ASM (amd64_ref2,
1845 "movw (%rax),%ax");
1846 break;
1847 case 4:
1848 EMIT_ASM (amd64_ref4,
1849 "movl (%rax),%eax");
1850 break;
1851 case 8:
1852 EMIT_ASM (amd64_ref8,
1853 "movq (%rax),%rax");
1854 break;
1855 }
1856}
1857
1858static void
1859amd64_emit_if_goto (int *offset_p, int *size_p)
1860{
1861 EMIT_ASM (amd64_if_goto,
1862 "mov %rax,%rcx\n\t"
1863 "pop %rax\n\t"
1864 "cmp $0,%rcx\n\t"
1865 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1866 if (offset_p)
1867 *offset_p = 10;
1868 if (size_p)
1869 *size_p = 4;
1870}
1871
1872static void
1873amd64_emit_goto (int *offset_p, int *size_p)
1874{
1875 EMIT_ASM (amd64_goto,
1876 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1877 if (offset_p)
1878 *offset_p = 1;
1879 if (size_p)
1880 *size_p = 4;
1881}
1882
1883static void
1884amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1885{
1886 int diff = (to - (from + size));
1887 unsigned char buf[sizeof (int)];
1888
1889 if (size != 4)
1890 {
1891 emit_error = 1;
1892 return;
1893 }
1894
1895 memcpy (buf, &diff, sizeof (int));
4196ab2a 1896 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1897}
1898
1899static void
4e29fb54 1900amd64_emit_const (LONGEST num)
6a271cae
PA
1901{
1902 unsigned char buf[16];
1903 int i;
1904 CORE_ADDR buildaddr = current_insn_ptr;
1905
1906 i = 0;
1907 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1908 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1909 i += 8;
1910 append_insns (&buildaddr, i, buf);
1911 current_insn_ptr = buildaddr;
1912}
1913
1914static void
1915amd64_emit_call (CORE_ADDR fn)
1916{
1917 unsigned char buf[16];
1918 int i;
1919 CORE_ADDR buildaddr;
4e29fb54 1920 LONGEST offset64;
6a271cae
PA
1921
1922 /* The destination function being in the shared library, may be
1923 >31-bits away off the compiled code pad. */
1924
1925 buildaddr = current_insn_ptr;
1926
1927 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1928
1929 i = 0;
1930
1931 if (offset64 > INT_MAX || offset64 < INT_MIN)
1932 {
1933 /* Offset is too large for a call. Use callq, but that requires
1934 a register, so avoid it if possible. Use r10, since it is
1935 call-clobbered, we don't have to push/pop it. */
1936 buf[i++] = 0x48; /* mov $fn,%r10 */
1937 buf[i++] = 0xba;
1938 memcpy (buf + i, &fn, 8);
1939 i += 8;
1940 buf[i++] = 0xff; /* callq *%r10 */
1941 buf[i++] = 0xd2;
1942 }
1943 else
1944 {
1945 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1946
1947 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1948 memcpy (buf + i, &offset32, 4);
1949 i += 4;
1950 }
1951
1952 append_insns (&buildaddr, i, buf);
1953 current_insn_ptr = buildaddr;
1954}
1955
1956static void
1957amd64_emit_reg (int reg)
1958{
1959 unsigned char buf[16];
1960 int i;
1961 CORE_ADDR buildaddr;
1962
1963 /* Assume raw_regs is still in %rdi. */
1964 buildaddr = current_insn_ptr;
1965 i = 0;
1966 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1967 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1968 i += 4;
1969 append_insns (&buildaddr, i, buf);
1970 current_insn_ptr = buildaddr;
1971 amd64_emit_call (get_raw_reg_func_addr ());
1972}
1973
1974static void
1975amd64_emit_pop (void)
1976{
1977 EMIT_ASM (amd64_pop,
1978 "pop %rax");
1979}
1980
1981static void
1982amd64_emit_stack_flush (void)
1983{
1984 EMIT_ASM (amd64_stack_flush,
1985 "push %rax");
1986}
1987
1988static void
1989amd64_emit_zero_ext (int arg)
1990{
1991 switch (arg)
1992 {
1993 case 8:
1994 EMIT_ASM (amd64_zero_ext_8,
1995 "and $0xff,%rax");
1996 break;
1997 case 16:
1998 EMIT_ASM (amd64_zero_ext_16,
1999 "and $0xffff,%rax");
2000 break;
2001 case 32:
2002 EMIT_ASM (amd64_zero_ext_32,
2003 "mov $0xffffffff,%rcx\n\t"
2004 "and %rcx,%rax");
2005 break;
2006 default:
2007 emit_error = 1;
2008 }
2009}
2010
2011static void
2012amd64_emit_swap (void)
2013{
2014 EMIT_ASM (amd64_swap,
2015 "mov %rax,%rcx\n\t"
2016 "pop %rax\n\t"
2017 "push %rcx");
2018}
2019
2020static void
2021amd64_emit_stack_adjust (int n)
2022{
2023 unsigned char buf[16];
2024 int i;
2025 CORE_ADDR buildaddr = current_insn_ptr;
2026
2027 i = 0;
2028 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2029 buf[i++] = 0x8d;
2030 buf[i++] = 0x64;
2031 buf[i++] = 0x24;
2032 /* This only handles adjustments up to 16, but we don't expect any more. */
2033 buf[i++] = n * 8;
2034 append_insns (&buildaddr, i, buf);
2035 current_insn_ptr = buildaddr;
2036}
2037
2038/* FN's prototype is `LONGEST(*fn)(int)'. */
2039
2040static void
2041amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2042{
2043 unsigned char buf[16];
2044 int i;
2045 CORE_ADDR buildaddr;
2046
2047 buildaddr = current_insn_ptr;
2048 i = 0;
2049 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2050 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2051 i += 4;
2052 append_insns (&buildaddr, i, buf);
2053 current_insn_ptr = buildaddr;
2054 amd64_emit_call (fn);
2055}
2056
4e29fb54 2057/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2058
2059static void
2060amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2061{
2062 unsigned char buf[16];
2063 int i;
2064 CORE_ADDR buildaddr;
2065
2066 buildaddr = current_insn_ptr;
2067 i = 0;
2068 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2069 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2070 i += 4;
2071 append_insns (&buildaddr, i, buf);
2072 current_insn_ptr = buildaddr;
2073 EMIT_ASM (amd64_void_call_2_a,
2074 /* Save away a copy of the stack top. */
2075 "push %rax\n\t"
2076 /* Also pass top as the second argument. */
2077 "mov %rax,%rsi");
2078 amd64_emit_call (fn);
2079 EMIT_ASM (amd64_void_call_2_b,
2080 /* Restore the stack top, %rax may have been trashed. */
2081 "pop %rax");
2082}
2083
df4a0200 2084static void
6b9801d4
SS
2085amd64_emit_eq_goto (int *offset_p, int *size_p)
2086{
2087 EMIT_ASM (amd64_eq,
2088 "cmp %rax,(%rsp)\n\t"
2089 "jne .Lamd64_eq_fallthru\n\t"
2090 "lea 0x8(%rsp),%rsp\n\t"
2091 "pop %rax\n\t"
2092 /* jmp, but don't trust the assembler to choose the right jump */
2093 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2094 ".Lamd64_eq_fallthru:\n\t"
2095 "lea 0x8(%rsp),%rsp\n\t"
2096 "pop %rax");
2097
2098 if (offset_p)
2099 *offset_p = 13;
2100 if (size_p)
2101 *size_p = 4;
2102}
2103
df4a0200 2104static void
6b9801d4
SS
2105amd64_emit_ne_goto (int *offset_p, int *size_p)
2106{
2107 EMIT_ASM (amd64_ne,
2108 "cmp %rax,(%rsp)\n\t"
2109 "je .Lamd64_ne_fallthru\n\t"
2110 "lea 0x8(%rsp),%rsp\n\t"
2111 "pop %rax\n\t"
2112 /* jmp, but don't trust the assembler to choose the right jump */
2113 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2114 ".Lamd64_ne_fallthru:\n\t"
2115 "lea 0x8(%rsp),%rsp\n\t"
2116 "pop %rax");
2117
2118 if (offset_p)
2119 *offset_p = 13;
2120 if (size_p)
2121 *size_p = 4;
2122}
2123
df4a0200 2124static void
6b9801d4
SS
2125amd64_emit_lt_goto (int *offset_p, int *size_p)
2126{
2127 EMIT_ASM (amd64_lt,
2128 "cmp %rax,(%rsp)\n\t"
2129 "jnl .Lamd64_lt_fallthru\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2131 "pop %rax\n\t"
2132 /* jmp, but don't trust the assembler to choose the right jump */
2133 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2134 ".Lamd64_lt_fallthru:\n\t"
2135 "lea 0x8(%rsp),%rsp\n\t"
2136 "pop %rax");
2137
2138 if (offset_p)
2139 *offset_p = 13;
2140 if (size_p)
2141 *size_p = 4;
2142}
2143
df4a0200 2144static void
6b9801d4
SS
2145amd64_emit_le_goto (int *offset_p, int *size_p)
2146{
2147 EMIT_ASM (amd64_le,
2148 "cmp %rax,(%rsp)\n\t"
2149 "jnle .Lamd64_le_fallthru\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2151 "pop %rax\n\t"
2152 /* jmp, but don't trust the assembler to choose the right jump */
2153 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2154 ".Lamd64_le_fallthru:\n\t"
2155 "lea 0x8(%rsp),%rsp\n\t"
2156 "pop %rax");
2157
2158 if (offset_p)
2159 *offset_p = 13;
2160 if (size_p)
2161 *size_p = 4;
2162}
2163
df4a0200 2164static void
6b9801d4
SS
2165amd64_emit_gt_goto (int *offset_p, int *size_p)
2166{
2167 EMIT_ASM (amd64_gt,
2168 "cmp %rax,(%rsp)\n\t"
2169 "jng .Lamd64_gt_fallthru\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2171 "pop %rax\n\t"
2172 /* jmp, but don't trust the assembler to choose the right jump */
2173 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2174 ".Lamd64_gt_fallthru:\n\t"
2175 "lea 0x8(%rsp),%rsp\n\t"
2176 "pop %rax");
2177
2178 if (offset_p)
2179 *offset_p = 13;
2180 if (size_p)
2181 *size_p = 4;
2182}
2183
df4a0200 2184static void
6b9801d4
SS
2185amd64_emit_ge_goto (int *offset_p, int *size_p)
2186{
2187 EMIT_ASM (amd64_ge,
2188 "cmp %rax,(%rsp)\n\t"
2189 "jnge .Lamd64_ge_fallthru\n\t"
2190 ".Lamd64_ge_jump:\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2192 "pop %rax\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_ge_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2197 "pop %rax");
2198
2199 if (offset_p)
2200 *offset_p = 13;
2201 if (size_p)
2202 *size_p = 4;
2203}
2204
6a271cae
PA
2205struct emit_ops amd64_emit_ops =
2206 {
2207 amd64_emit_prologue,
2208 amd64_emit_epilogue,
2209 amd64_emit_add,
2210 amd64_emit_sub,
2211 amd64_emit_mul,
2212 amd64_emit_lsh,
2213 amd64_emit_rsh_signed,
2214 amd64_emit_rsh_unsigned,
2215 amd64_emit_ext,
2216 amd64_emit_log_not,
2217 amd64_emit_bit_and,
2218 amd64_emit_bit_or,
2219 amd64_emit_bit_xor,
2220 amd64_emit_bit_not,
2221 amd64_emit_equal,
2222 amd64_emit_less_signed,
2223 amd64_emit_less_unsigned,
2224 amd64_emit_ref,
2225 amd64_emit_if_goto,
2226 amd64_emit_goto,
2227 amd64_write_goto_address,
2228 amd64_emit_const,
2229 amd64_emit_call,
2230 amd64_emit_reg,
2231 amd64_emit_pop,
2232 amd64_emit_stack_flush,
2233 amd64_emit_zero_ext,
2234 amd64_emit_swap,
2235 amd64_emit_stack_adjust,
2236 amd64_emit_int_call_1,
6b9801d4
SS
2237 amd64_emit_void_call_2,
2238 amd64_emit_eq_goto,
2239 amd64_emit_ne_goto,
2240 amd64_emit_lt_goto,
2241 amd64_emit_le_goto,
2242 amd64_emit_gt_goto,
2243 amd64_emit_ge_goto
6a271cae
PA
2244 };
2245
2246#endif /* __x86_64__ */
2247
2248static void
2249i386_emit_prologue (void)
2250{
2251 EMIT_ASM32 (i386_prologue,
2252 "push %ebp\n\t"
bf15cbda
SS
2253 "mov %esp,%ebp\n\t"
2254 "push %ebx");
6a271cae
PA
2255 /* At this point, the raw regs base address is at 8(%ebp), and the
2256 value pointer is at 12(%ebp). */
2257}
2258
2259static void
2260i386_emit_epilogue (void)
2261{
2262 EMIT_ASM32 (i386_epilogue,
2263 "mov 12(%ebp),%ecx\n\t"
2264 "mov %eax,(%ecx)\n\t"
2265 "mov %ebx,0x4(%ecx)\n\t"
2266 "xor %eax,%eax\n\t"
bf15cbda 2267 "pop %ebx\n\t"
6a271cae
PA
2268 "pop %ebp\n\t"
2269 "ret");
2270}
2271
2272static void
2273i386_emit_add (void)
2274{
2275 EMIT_ASM32 (i386_add,
2276 "add (%esp),%eax\n\t"
2277 "adc 0x4(%esp),%ebx\n\t"
2278 "lea 0x8(%esp),%esp");
2279}
2280
2281static void
2282i386_emit_sub (void)
2283{
2284 EMIT_ASM32 (i386_sub,
2285 "subl %eax,(%esp)\n\t"
2286 "sbbl %ebx,4(%esp)\n\t"
2287 "pop %eax\n\t"
2288 "pop %ebx\n\t");
2289}
2290
2291static void
2292i386_emit_mul (void)
2293{
2294 emit_error = 1;
2295}
2296
2297static void
2298i386_emit_lsh (void)
2299{
2300 emit_error = 1;
2301}
2302
2303static void
2304i386_emit_rsh_signed (void)
2305{
2306 emit_error = 1;
2307}
2308
2309static void
2310i386_emit_rsh_unsigned (void)
2311{
2312 emit_error = 1;
2313}
2314
2315static void
2316i386_emit_ext (int arg)
2317{
2318 switch (arg)
2319 {
2320 case 8:
2321 EMIT_ASM32 (i386_ext_8,
2322 "cbtw\n\t"
2323 "cwtl\n\t"
2324 "movl %eax,%ebx\n\t"
2325 "sarl $31,%ebx");
2326 break;
2327 case 16:
2328 EMIT_ASM32 (i386_ext_16,
2329 "cwtl\n\t"
2330 "movl %eax,%ebx\n\t"
2331 "sarl $31,%ebx");
2332 break;
2333 case 32:
2334 EMIT_ASM32 (i386_ext_32,
2335 "movl %eax,%ebx\n\t"
2336 "sarl $31,%ebx");
2337 break;
2338 default:
2339 emit_error = 1;
2340 }
2341}
2342
2343static void
2344i386_emit_log_not (void)
2345{
2346 EMIT_ASM32 (i386_log_not,
2347 "or %ebx,%eax\n\t"
2348 "test %eax,%eax\n\t"
2349 "sete %cl\n\t"
2350 "xor %ebx,%ebx\n\t"
2351 "movzbl %cl,%eax");
2352}
2353
2354static void
2355i386_emit_bit_and (void)
2356{
2357 EMIT_ASM32 (i386_and,
2358 "and (%esp),%eax\n\t"
2359 "and 0x4(%esp),%ebx\n\t"
2360 "lea 0x8(%esp),%esp");
2361}
2362
2363static void
2364i386_emit_bit_or (void)
2365{
2366 EMIT_ASM32 (i386_or,
2367 "or (%esp),%eax\n\t"
2368 "or 0x4(%esp),%ebx\n\t"
2369 "lea 0x8(%esp),%esp");
2370}
2371
2372static void
2373i386_emit_bit_xor (void)
2374{
2375 EMIT_ASM32 (i386_xor,
2376 "xor (%esp),%eax\n\t"
2377 "xor 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2379}
2380
2381static void
2382i386_emit_bit_not (void)
2383{
2384 EMIT_ASM32 (i386_bit_not,
2385 "xor $0xffffffff,%eax\n\t"
2386 "xor $0xffffffff,%ebx\n\t");
2387}
2388
2389static void
2390i386_emit_equal (void)
2391{
2392 EMIT_ASM32 (i386_equal,
2393 "cmpl %ebx,4(%esp)\n\t"
2394 "jne .Li386_equal_false\n\t"
2395 "cmpl %eax,(%esp)\n\t"
2396 "je .Li386_equal_true\n\t"
2397 ".Li386_equal_false:\n\t"
2398 "xor %eax,%eax\n\t"
2399 "jmp .Li386_equal_end\n\t"
2400 ".Li386_equal_true:\n\t"
2401 "mov $1,%eax\n\t"
2402 ".Li386_equal_end:\n\t"
2403 "xor %ebx,%ebx\n\t"
2404 "lea 0x8(%esp),%esp");
2405}
2406
2407static void
2408i386_emit_less_signed (void)
2409{
2410 EMIT_ASM32 (i386_less_signed,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jl .Li386_less_signed_true\n\t"
2413 "jne .Li386_less_signed_false\n\t"
2414 "cmpl %eax,(%esp)\n\t"
2415 "jl .Li386_less_signed_true\n\t"
2416 ".Li386_less_signed_false:\n\t"
2417 "xor %eax,%eax\n\t"
2418 "jmp .Li386_less_signed_end\n\t"
2419 ".Li386_less_signed_true:\n\t"
2420 "mov $1,%eax\n\t"
2421 ".Li386_less_signed_end:\n\t"
2422 "xor %ebx,%ebx\n\t"
2423 "lea 0x8(%esp),%esp");
2424}
2425
2426static void
2427i386_emit_less_unsigned (void)
2428{
2429 EMIT_ASM32 (i386_less_unsigned,
2430 "cmpl %ebx,4(%esp)\n\t"
2431 "jb .Li386_less_unsigned_true\n\t"
2432 "jne .Li386_less_unsigned_false\n\t"
2433 "cmpl %eax,(%esp)\n\t"
2434 "jb .Li386_less_unsigned_true\n\t"
2435 ".Li386_less_unsigned_false:\n\t"
2436 "xor %eax,%eax\n\t"
2437 "jmp .Li386_less_unsigned_end\n\t"
2438 ".Li386_less_unsigned_true:\n\t"
2439 "mov $1,%eax\n\t"
2440 ".Li386_less_unsigned_end:\n\t"
2441 "xor %ebx,%ebx\n\t"
2442 "lea 0x8(%esp),%esp");
2443}
2444
2445static void
2446i386_emit_ref (int size)
2447{
2448 switch (size)
2449 {
2450 case 1:
2451 EMIT_ASM32 (i386_ref1,
2452 "movb (%eax),%al");
2453 break;
2454 case 2:
2455 EMIT_ASM32 (i386_ref2,
2456 "movw (%eax),%ax");
2457 break;
2458 case 4:
2459 EMIT_ASM32 (i386_ref4,
2460 "movl (%eax),%eax");
2461 break;
2462 case 8:
2463 EMIT_ASM32 (i386_ref8,
2464 "movl 4(%eax),%ebx\n\t"
2465 "movl (%eax),%eax");
2466 break;
2467 }
2468}
2469
2470static void
2471i386_emit_if_goto (int *offset_p, int *size_p)
2472{
2473 EMIT_ASM32 (i386_if_goto,
2474 "mov %eax,%ecx\n\t"
2475 "or %ebx,%ecx\n\t"
2476 "pop %eax\n\t"
2477 "pop %ebx\n\t"
2478 "cmpl $0,%ecx\n\t"
2479 /* Don't trust the assembler to choose the right jump */
2480 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2481
2482 if (offset_p)
2483 *offset_p = 11; /* be sure that this matches the sequence above */
2484 if (size_p)
2485 *size_p = 4;
2486}
2487
2488static void
2489i386_emit_goto (int *offset_p, int *size_p)
2490{
2491 EMIT_ASM32 (i386_goto,
2492 /* Don't trust the assembler to choose the right jump */
2493 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2494 if (offset_p)
2495 *offset_p = 1;
2496 if (size_p)
2497 *size_p = 4;
2498}
2499
2500static void
2501i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2502{
2503 int diff = (to - (from + size));
2504 unsigned char buf[sizeof (int)];
2505
2506 /* We're only doing 4-byte sizes at the moment. */
2507 if (size != 4)
2508 {
2509 emit_error = 1;
2510 return;
2511 }
2512
2513 memcpy (buf, &diff, sizeof (int));
4196ab2a 2514 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2515}
2516
2517static void
4e29fb54 2518i386_emit_const (LONGEST num)
6a271cae
PA
2519{
2520 unsigned char buf[16];
b00ad6ff 2521 int i, hi, lo;
6a271cae
PA
2522 CORE_ADDR buildaddr = current_insn_ptr;
2523
2524 i = 0;
2525 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2526 lo = num & 0xffffffff;
2527 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2528 i += 4;
2529 hi = ((num >> 32) & 0xffffffff);
2530 if (hi)
2531 {
2532 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2533 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2534 i += 4;
2535 }
2536 else
2537 {
2538 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2539 }
2540 append_insns (&buildaddr, i, buf);
2541 current_insn_ptr = buildaddr;
2542}
2543
2544static void
2545i386_emit_call (CORE_ADDR fn)
2546{
2547 unsigned char buf[16];
2548 int i, offset;
2549 CORE_ADDR buildaddr;
2550
2551 buildaddr = current_insn_ptr;
2552 i = 0;
2553 buf[i++] = 0xe8; /* call <reladdr> */
2554 offset = ((int) fn) - (buildaddr + 5);
2555 memcpy (buf + 1, &offset, 4);
2556 append_insns (&buildaddr, 5, buf);
2557 current_insn_ptr = buildaddr;
2558}
2559
2560static void
2561i386_emit_reg (int reg)
2562{
2563 unsigned char buf[16];
2564 int i;
2565 CORE_ADDR buildaddr;
2566
2567 EMIT_ASM32 (i386_reg_a,
2568 "sub $0x8,%esp");
2569 buildaddr = current_insn_ptr;
2570 i = 0;
2571 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2572 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2573 i += 4;
2574 append_insns (&buildaddr, i, buf);
2575 current_insn_ptr = buildaddr;
2576 EMIT_ASM32 (i386_reg_b,
2577 "mov %eax,4(%esp)\n\t"
2578 "mov 8(%ebp),%eax\n\t"
2579 "mov %eax,(%esp)");
2580 i386_emit_call (get_raw_reg_func_addr ());
2581 EMIT_ASM32 (i386_reg_c,
2582 "xor %ebx,%ebx\n\t"
2583 "lea 0x8(%esp),%esp");
2584}
2585
2586static void
2587i386_emit_pop (void)
2588{
2589 EMIT_ASM32 (i386_pop,
2590 "pop %eax\n\t"
2591 "pop %ebx");
2592}
2593
2594static void
2595i386_emit_stack_flush (void)
2596{
2597 EMIT_ASM32 (i386_stack_flush,
2598 "push %ebx\n\t"
2599 "push %eax");
2600}
2601
2602static void
2603i386_emit_zero_ext (int arg)
2604{
2605 switch (arg)
2606 {
2607 case 8:
2608 EMIT_ASM32 (i386_zero_ext_8,
2609 "and $0xff,%eax\n\t"
2610 "xor %ebx,%ebx");
2611 break;
2612 case 16:
2613 EMIT_ASM32 (i386_zero_ext_16,
2614 "and $0xffff,%eax\n\t"
2615 "xor %ebx,%ebx");
2616 break;
2617 case 32:
2618 EMIT_ASM32 (i386_zero_ext_32,
2619 "xor %ebx,%ebx");
2620 break;
2621 default:
2622 emit_error = 1;
2623 }
2624}
2625
2626static void
2627i386_emit_swap (void)
2628{
2629 EMIT_ASM32 (i386_swap,
2630 "mov %eax,%ecx\n\t"
2631 "mov %ebx,%edx\n\t"
2632 "pop %eax\n\t"
2633 "pop %ebx\n\t"
2634 "push %edx\n\t"
2635 "push %ecx");
2636}
2637
2638static void
2639i386_emit_stack_adjust (int n)
2640{
2641 unsigned char buf[16];
2642 int i;
2643 CORE_ADDR buildaddr = current_insn_ptr;
2644
2645 i = 0;
2646 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2647 buf[i++] = 0x64;
2648 buf[i++] = 0x24;
2649 buf[i++] = n * 8;
2650 append_insns (&buildaddr, i, buf);
2651 current_insn_ptr = buildaddr;
2652}
2653
2654/* FN's prototype is `LONGEST(*fn)(int)'. */
2655
2656static void
2657i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2658{
2659 unsigned char buf[16];
2660 int i;
2661 CORE_ADDR buildaddr;
2662
2663 EMIT_ASM32 (i386_int_call_1_a,
2664 /* Reserve a bit of stack space. */
2665 "sub $0x8,%esp");
2666 /* Put the one argument on the stack. */
2667 buildaddr = current_insn_ptr;
2668 i = 0;
2669 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2670 buf[i++] = 0x04;
2671 buf[i++] = 0x24;
b00ad6ff 2672 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2673 i += 4;
2674 append_insns (&buildaddr, i, buf);
2675 current_insn_ptr = buildaddr;
2676 i386_emit_call (fn);
2677 EMIT_ASM32 (i386_int_call_1_c,
2678 "mov %edx,%ebx\n\t"
2679 "lea 0x8(%esp),%esp");
2680}
2681
4e29fb54 2682/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2683
2684static void
2685i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2686{
2687 unsigned char buf[16];
2688 int i;
2689 CORE_ADDR buildaddr;
2690
2691 EMIT_ASM32 (i386_void_call_2_a,
2692 /* Preserve %eax only; we don't have to worry about %ebx. */
2693 "push %eax\n\t"
2694 /* Reserve a bit of stack space for arguments. */
2695 "sub $0x10,%esp\n\t"
2696 /* Copy "top" to the second argument position. (Note that
2697 we can't assume function won't scribble on its
2698 arguments, so don't try to restore from this.) */
2699 "mov %eax,4(%esp)\n\t"
2700 "mov %ebx,8(%esp)");
2701 /* Put the first argument on the stack. */
2702 buildaddr = current_insn_ptr;
2703 i = 0;
2704 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2705 buf[i++] = 0x04;
2706 buf[i++] = 0x24;
b00ad6ff 2707 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2708 i += 4;
2709 append_insns (&buildaddr, i, buf);
2710 current_insn_ptr = buildaddr;
2711 i386_emit_call (fn);
2712 EMIT_ASM32 (i386_void_call_2_b,
2713 "lea 0x10(%esp),%esp\n\t"
2714 /* Restore original stack top. */
2715 "pop %eax");
2716}
2717
6b9801d4 2718
df4a0200 2719static void
6b9801d4
SS
2720i386_emit_eq_goto (int *offset_p, int *size_p)
2721{
2722 EMIT_ASM32 (eq,
2723 /* Check low half first, more likely to be decider */
2724 "cmpl %eax,(%esp)\n\t"
2725 "jne .Leq_fallthru\n\t"
2726 "cmpl %ebx,4(%esp)\n\t"
2727 "jne .Leq_fallthru\n\t"
2728 "lea 0x8(%esp),%esp\n\t"
2729 "pop %eax\n\t"
2730 "pop %ebx\n\t"
2731 /* jmp, but don't trust the assembler to choose the right jump */
2732 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2733 ".Leq_fallthru:\n\t"
2734 "lea 0x8(%esp),%esp\n\t"
2735 "pop %eax\n\t"
2736 "pop %ebx");
2737
2738 if (offset_p)
2739 *offset_p = 18;
2740 if (size_p)
2741 *size_p = 4;
2742}
2743
df4a0200 2744static void
6b9801d4
SS
2745i386_emit_ne_goto (int *offset_p, int *size_p)
2746{
2747 EMIT_ASM32 (ne,
2748 /* Check low half first, more likely to be decider */
2749 "cmpl %eax,(%esp)\n\t"
2750 "jne .Lne_jump\n\t"
2751 "cmpl %ebx,4(%esp)\n\t"
2752 "je .Lne_fallthru\n\t"
2753 ".Lne_jump:\n\t"
2754 "lea 0x8(%esp),%esp\n\t"
2755 "pop %eax\n\t"
2756 "pop %ebx\n\t"
2757 /* jmp, but don't trust the assembler to choose the right jump */
2758 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2759 ".Lne_fallthru:\n\t"
2760 "lea 0x8(%esp),%esp\n\t"
2761 "pop %eax\n\t"
2762 "pop %ebx");
2763
2764 if (offset_p)
2765 *offset_p = 18;
2766 if (size_p)
2767 *size_p = 4;
2768}
2769
df4a0200 2770static void
6b9801d4
SS
2771i386_emit_lt_goto (int *offset_p, int *size_p)
2772{
2773 EMIT_ASM32 (lt,
2774 "cmpl %ebx,4(%esp)\n\t"
2775 "jl .Llt_jump\n\t"
2776 "jne .Llt_fallthru\n\t"
2777 "cmpl %eax,(%esp)\n\t"
2778 "jnl .Llt_fallthru\n\t"
2779 ".Llt_jump:\n\t"
2780 "lea 0x8(%esp),%esp\n\t"
2781 "pop %eax\n\t"
2782 "pop %ebx\n\t"
2783 /* jmp, but don't trust the assembler to choose the right jump */
2784 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2785 ".Llt_fallthru:\n\t"
2786 "lea 0x8(%esp),%esp\n\t"
2787 "pop %eax\n\t"
2788 "pop %ebx");
2789
2790 if (offset_p)
2791 *offset_p = 20;
2792 if (size_p)
2793 *size_p = 4;
2794}
2795
df4a0200 2796static void
6b9801d4
SS
2797i386_emit_le_goto (int *offset_p, int *size_p)
2798{
2799 EMIT_ASM32 (le,
2800 "cmpl %ebx,4(%esp)\n\t"
2801 "jle .Lle_jump\n\t"
2802 "jne .Lle_fallthru\n\t"
2803 "cmpl %eax,(%esp)\n\t"
2804 "jnle .Lle_fallthru\n\t"
2805 ".Lle_jump:\n\t"
2806 "lea 0x8(%esp),%esp\n\t"
2807 "pop %eax\n\t"
2808 "pop %ebx\n\t"
2809 /* jmp, but don't trust the assembler to choose the right jump */
2810 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2811 ".Lle_fallthru:\n\t"
2812 "lea 0x8(%esp),%esp\n\t"
2813 "pop %eax\n\t"
2814 "pop %ebx");
2815
2816 if (offset_p)
2817 *offset_p = 20;
2818 if (size_p)
2819 *size_p = 4;
2820}
2821
df4a0200 2822static void
6b9801d4
SS
2823i386_emit_gt_goto (int *offset_p, int *size_p)
2824{
2825 EMIT_ASM32 (gt,
2826 "cmpl %ebx,4(%esp)\n\t"
2827 "jg .Lgt_jump\n\t"
2828 "jne .Lgt_fallthru\n\t"
2829 "cmpl %eax,(%esp)\n\t"
2830 "jng .Lgt_fallthru\n\t"
2831 ".Lgt_jump:\n\t"
2832 "lea 0x8(%esp),%esp\n\t"
2833 "pop %eax\n\t"
2834 "pop %ebx\n\t"
2835 /* jmp, but don't trust the assembler to choose the right jump */
2836 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2837 ".Lgt_fallthru:\n\t"
2838 "lea 0x8(%esp),%esp\n\t"
2839 "pop %eax\n\t"
2840 "pop %ebx");
2841
2842 if (offset_p)
2843 *offset_p = 20;
2844 if (size_p)
2845 *size_p = 4;
2846}
2847
df4a0200 2848static void
6b9801d4
SS
2849i386_emit_ge_goto (int *offset_p, int *size_p)
2850{
2851 EMIT_ASM32 (ge,
2852 "cmpl %ebx,4(%esp)\n\t"
2853 "jge .Lge_jump\n\t"
2854 "jne .Lge_fallthru\n\t"
2855 "cmpl %eax,(%esp)\n\t"
2856 "jnge .Lge_fallthru\n\t"
2857 ".Lge_jump:\n\t"
2858 "lea 0x8(%esp),%esp\n\t"
2859 "pop %eax\n\t"
2860 "pop %ebx\n\t"
2861 /* jmp, but don't trust the assembler to choose the right jump */
2862 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2863 ".Lge_fallthru:\n\t"
2864 "lea 0x8(%esp),%esp\n\t"
2865 "pop %eax\n\t"
2866 "pop %ebx");
2867
2868 if (offset_p)
2869 *offset_p = 20;
2870 if (size_p)
2871 *size_p = 4;
2872}
2873
6a271cae
PA
2874struct emit_ops i386_emit_ops =
2875 {
2876 i386_emit_prologue,
2877 i386_emit_epilogue,
2878 i386_emit_add,
2879 i386_emit_sub,
2880 i386_emit_mul,
2881 i386_emit_lsh,
2882 i386_emit_rsh_signed,
2883 i386_emit_rsh_unsigned,
2884 i386_emit_ext,
2885 i386_emit_log_not,
2886 i386_emit_bit_and,
2887 i386_emit_bit_or,
2888 i386_emit_bit_xor,
2889 i386_emit_bit_not,
2890 i386_emit_equal,
2891 i386_emit_less_signed,
2892 i386_emit_less_unsigned,
2893 i386_emit_ref,
2894 i386_emit_if_goto,
2895 i386_emit_goto,
2896 i386_write_goto_address,
2897 i386_emit_const,
2898 i386_emit_call,
2899 i386_emit_reg,
2900 i386_emit_pop,
2901 i386_emit_stack_flush,
2902 i386_emit_zero_ext,
2903 i386_emit_swap,
2904 i386_emit_stack_adjust,
2905 i386_emit_int_call_1,
6b9801d4
SS
2906 i386_emit_void_call_2,
2907 i386_emit_eq_goto,
2908 i386_emit_ne_goto,
2909 i386_emit_lt_goto,
2910 i386_emit_le_goto,
2911 i386_emit_gt_goto,
2912 i386_emit_ge_goto
6a271cae
PA
2913 };
2914
2915
2916static struct emit_ops *
2917x86_emit_ops (void)
2918{
2919#ifdef __x86_64__
3aee8918 2920 if (is_64bit_tdesc ())
6a271cae
PA
2921 return &amd64_emit_ops;
2922 else
2923#endif
2924 return &i386_emit_ops;
2925}
2926
3ca4edb6 2927/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2928
3ca4edb6
TBA
2929const gdb_byte *
2930x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2931{
2932 *size = x86_breakpoint_len;
2933 return x86_breakpoint;
2934}
2935
c2d6af84
PA
2936static int
2937x86_supports_range_stepping (void)
2938{
2939 return 1;
2940}
2941
7d00775e
AT
2942/* Implementation of linux_target_ops method "supports_hardware_single_step".
2943 */
2944
2945static int
2946x86_supports_hardware_single_step (void)
2947{
2948 return 1;
2949}
2950
ae91f625
MK
2951static int
2952x86_get_ipa_tdesc_idx (void)
2953{
2954 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2955 const struct target_desc *tdesc = regcache->tdesc;
2956
2957#ifdef __x86_64__
b4570e4b 2958 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2959#endif
2960
f49ff000 2961 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2962 return X86_TDESC_SSE;
ae91f625 2963
f49ff000 2964 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2965}
2966
d0722149
DE
2967/* This is initialized assuming an amd64 target.
2968 x86_arch_setup will correct it for i386 or amd64 targets. */
2969
2970struct linux_target_ops the_low_target =
2971{
fa593d66 2972 x86_get_thread_area,
6a271cae 2973 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2974 x86_emit_ops,
2975 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2976 x86_supports_range_stepping,
7d00775e 2977 x86_supports_hardware_single_step,
82075af2 2978 x86_get_syscall_trapinfo,
ae91f625 2979 x86_get_ipa_tdesc_idx,
d0722149 2980};
3aee8918 2981
ef0478f6
TBA
2982/* The linux target ops object. */
2983
2984linux_process_target *the_linux_target = &the_x86_target;
2985
3aee8918
PA
2986void
2987initialize_low_arch (void)
2988{
2989 /* Initialize the Linux target descriptions. */
2990#ifdef __x86_64__
cc397f3a 2991 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2992 copy_target_description (tdesc_amd64_linux_no_xml,
2993 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2994 false));
3aee8918
PA
2995 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2996#endif
f49ff000 2997
cc397f3a 2998 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2999 copy_target_description (tdesc_i386_linux_no_xml,
3000 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
3001 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3002
3003 initialize_regsets_info (&x86_regsets_info);
3004}
This page took 1.056532 seconds and 4 git commands to generate.