gdbserver/linux-low: turn 'supports_software_single_step' and 'get_next_pcs' into...
[deliverable/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
b811d2c2 3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918
PA
50#ifdef __x86_64__
51static struct target_desc *tdesc_amd64_linux_no_xml;
52#endif
53static struct target_desc *tdesc_i386_linux_no_xml;
54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
61static const char *xmltarget_i386_linux_no_xml = "@<target>\
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
1570b33e
L
67static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
797bcff5
TBA
103 /* Update all the target description of all processes; a new GDB
104 connected, and it may or not support xml target descriptions. */
105 void update_xmltarget ();
106
aa8d21c9
TBA
107 const regs_info *get_regs_info () override;
108
3ca4edb6
TBA
109 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
110
797bcff5
TBA
111protected:
112
113 void low_arch_setup () override;
daca57a7
TBA
114
115 bool low_cannot_fetch_register (int regno) override;
116
117 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
118
119 bool low_supports_breakpoints () override;
120
121 CORE_ADDR low_get_pc (regcache *regcache) override;
122
123 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
ef0478f6
TBA
124};
125
126/* The singleton target ops object. */
127
128static x86_target the_x86_target;
129
aa5ca48f
DE
130/* Per-process arch-specific data we want to keep. */
131
132struct arch_process_info
133{
df7e5265 134 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
135};
136
d0722149
DE
137#ifdef __x86_64__
138
139/* Mapping between the general-purpose registers in `struct user'
140 format and GDB's register array layout.
141 Note that the transfer layout uses 64-bit regs. */
142static /*const*/ int i386_regmap[] =
143{
144 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
145 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
146 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
147 DS * 8, ES * 8, FS * 8, GS * 8
148};
149
150#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
151
152/* So code below doesn't have to care, i386 or amd64. */
153#define ORIG_EAX ORIG_RAX
bc9540e8 154#define REGSIZE 8
d0722149
DE
155
156static const int x86_64_regmap[] =
157{
158 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
159 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
160 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
161 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
162 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
163 DS * 8, ES * 8, FS * 8, GS * 8,
164 -1, -1, -1, -1, -1, -1, -1, -1,
165 -1, -1, -1, -1, -1, -1, -1, -1,
166 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
167 -1,
168 -1, -1, -1, -1, -1, -1, -1, -1,
169 ORIG_RAX * 8,
2735833d
WT
170#ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
171 21 * 8, 22 * 8,
172#else
173 -1, -1,
174#endif
a196ebeb 175 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
176 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
177 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
182 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1 /* pkru */
d0722149
DE
187};
188
189#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 190#define X86_64_USER_REGS (GS + 1)
d0722149
DE
191
192#else /* ! __x86_64__ */
193
194/* Mapping between the general-purpose registers in `struct user'
195 format and GDB's register array layout. */
196static /*const*/ int i386_regmap[] =
197{
198 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
199 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
200 EIP * 4, EFL * 4, CS * 4, SS * 4,
201 DS * 4, ES * 4, FS * 4, GS * 4
202};
203
204#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
205
bc9540e8
PA
206#define REGSIZE 4
207
d0722149 208#endif
3aee8918
PA
209
210#ifdef __x86_64__
211
212/* Returns true if the current inferior belongs to a x86-64 process,
213 per the tdesc. */
214
215static int
216is_64bit_tdesc (void)
217{
0bfdf32f 218 struct regcache *regcache = get_thread_regcache (current_thread, 0);
3aee8918
PA
219
220 return register_size (regcache->tdesc, 0) == 8;
221}
222
223#endif
224
d0722149
DE
225\f
226/* Called by libthread_db. */
227
228ps_err_e
754653a7 229ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
230 lwpid_t lwpid, int idx, void **base)
231{
232#ifdef __x86_64__
3aee8918 233 int use_64bit = is_64bit_tdesc ();
d0722149
DE
234
235 if (use_64bit)
236 {
237 switch (idx)
238 {
239 case FS:
240 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
241 return PS_OK;
242 break;
243 case GS:
244 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
245 return PS_OK;
246 break;
247 default:
248 return PS_BADADDR;
249 }
250 return PS_ERR;
251 }
252#endif
253
254 {
255 unsigned int desc[4];
256
257 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
258 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
259 return PS_ERR;
260
d1ec4ce7
DE
261 /* Ensure we properly extend the value to 64-bits for x86_64. */
262 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
263 return PS_OK;
264 }
265}
fa593d66
PA
266
267/* Get the thread area address. This is used to recognize which
268 thread is which when tracing with the in-process agent library. We
269 don't read anything from the address, and treat it as opaque; it's
270 the address itself that we assume is unique per-thread. */
271
272static int
273x86_get_thread_area (int lwpid, CORE_ADDR *addr)
274{
275#ifdef __x86_64__
3aee8918 276 int use_64bit = is_64bit_tdesc ();
fa593d66
PA
277
278 if (use_64bit)
279 {
280 void *base;
281 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
282 {
283 *addr = (CORE_ADDR) (uintptr_t) base;
284 return 0;
285 }
286
287 return -1;
288 }
289#endif
290
291 {
f2907e49 292 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
d86d4aaf
DE
293 struct thread_info *thr = get_lwp_thread (lwp);
294 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
295 unsigned int desc[4];
296 ULONGEST gs = 0;
297 const int reg_thread_area = 3; /* bits to scale down register value. */
298 int idx;
299
300 collect_register_by_name (regcache, "gs", &gs);
301
302 idx = gs >> reg_thread_area;
303
304 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 305 lwpid_of (thr),
493e2a69 306 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
307 return -1;
308
309 *addr = desc[1];
310 return 0;
311 }
312}
313
314
d0722149 315\f
daca57a7
TBA
316bool
317x86_target::low_cannot_store_register (int regno)
d0722149 318{
3aee8918
PA
319#ifdef __x86_64__
320 if (is_64bit_tdesc ())
daca57a7 321 return false;
3aee8918
PA
322#endif
323
d0722149
DE
324 return regno >= I386_NUM_REGS;
325}
326
daca57a7
TBA
327bool
328x86_target::low_cannot_fetch_register (int regno)
d0722149 329{
3aee8918
PA
330#ifdef __x86_64__
331 if (is_64bit_tdesc ())
daca57a7 332 return false;
3aee8918
PA
333#endif
334
d0722149
DE
335 return regno >= I386_NUM_REGS;
336}
337
338static void
442ea881 339x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
340{
341 int i;
342
343#ifdef __x86_64__
3aee8918 344 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
345 {
346 for (i = 0; i < X86_64_NUM_REGS; i++)
347 if (x86_64_regmap[i] != -1)
442ea881 348 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
349
350#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
351 {
352 unsigned long base;
353 int lwpid = lwpid_of (current_thread);
354
355 collect_register_by_name (regcache, "fs_base", &base);
356 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
357
358 collect_register_by_name (regcache, "gs_base", &base);
359 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
360 }
361#endif
362
d0722149
DE
363 return;
364 }
9e0aa64f
JK
365
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf, 0x00, X86_64_USER_REGS * 8);
d0722149
DE
369#endif
370
371 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 372 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 373
442ea881 374 collect_register_by_name (regcache, "orig_eax",
bc9540e8 375 ((char *) buf) + ORIG_EAX * REGSIZE);
3f52fdbc 376
e90a813d 377#ifdef __x86_64__
3f52fdbc
KB
378 /* Sign extend EAX value to avoid potential syscall restart
379 problems.
380
381 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
382 for a detailed explanation. */
383 if (register_size (regcache->tdesc, 0) == 4)
384 {
385 void *ptr = ((gdb_byte *) buf
386 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
387
388 *(int64_t *) ptr = *(int32_t *) ptr;
389 }
e90a813d 390#endif
d0722149
DE
391}
392
393static void
442ea881 394x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
395{
396 int i;
397
398#ifdef __x86_64__
3aee8918 399 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
400 {
401 for (i = 0; i < X86_64_NUM_REGS; i++)
402 if (x86_64_regmap[i] != -1)
442ea881 403 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d
WT
404
405#ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
406 {
407 unsigned long base;
408 int lwpid = lwpid_of (current_thread);
409
410 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
411 supply_register_by_name (regcache, "fs_base", &base);
412
413 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
414 supply_register_by_name (regcache, "gs_base", &base);
415 }
416#endif
d0722149
DE
417 return;
418 }
419#endif
420
421 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 422 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 423
442ea881 424 supply_register_by_name (regcache, "orig_eax",
bc9540e8 425 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
426}
427
428static void
442ea881 429x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
430{
431#ifdef __x86_64__
442ea881 432 i387_cache_to_fxsave (regcache, buf);
d0722149 433#else
442ea881 434 i387_cache_to_fsave (regcache, buf);
d0722149
DE
435#endif
436}
437
438static void
442ea881 439x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
440{
441#ifdef __x86_64__
442ea881 442 i387_fxsave_to_cache (regcache, buf);
d0722149 443#else
442ea881 444 i387_fsave_to_cache (regcache, buf);
d0722149
DE
445#endif
446}
447
448#ifndef __x86_64__
449
450static void
442ea881 451x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 452{
442ea881 453 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
454}
455
456static void
442ea881 457x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 458{
442ea881 459 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
460}
461
462#endif
463
1570b33e
L
464static void
465x86_fill_xstateregset (struct regcache *regcache, void *buf)
466{
467 i387_cache_to_xsave (regcache, buf);
468}
469
470static void
471x86_store_xstateregset (struct regcache *regcache, const void *buf)
472{
473 i387_xsave_to_cache (regcache, buf);
474}
475
d0722149
DE
476/* ??? The non-biarch i386 case stores all the i387 regs twice.
477 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
478 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
479 doesn't work. IWBN to avoid the duplication in the case where it
480 does work. Maybe the arch_setup routine could check whether it works
3aee8918 481 and update the supported regsets accordingly. */
d0722149 482
3aee8918 483static struct regset_info x86_regsets[] =
d0722149
DE
484{
485#ifdef HAVE_PTRACE_GETREGS
1570b33e 486 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
487 GENERAL_REGS,
488 x86_fill_gregset, x86_store_gregset },
1570b33e
L
489 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
490 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
491# ifndef __x86_64__
492# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 493 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
494 EXTENDED_REGS,
495 x86_fill_fpxregset, x86_store_fpxregset },
496# endif
497# endif
1570b33e 498 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
499 FP_REGS,
500 x86_fill_fpregset, x86_store_fpregset },
501#endif /* HAVE_PTRACE_GETREGS */
50bc912a 502 NULL_REGSET
d0722149
DE
503};
504
bf9ae9d8
TBA
505bool
506x86_target::low_supports_breakpoints ()
507{
508 return true;
509}
510
511CORE_ADDR
512x86_target::low_get_pc (regcache *regcache)
d0722149 513{
3aee8918 514 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
515
516 if (use_64bit)
517 {
6598661d
PA
518 uint64_t pc;
519
442ea881 520 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
521 return (CORE_ADDR) pc;
522 }
523 else
524 {
6598661d
PA
525 uint32_t pc;
526
442ea881 527 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
528 return (CORE_ADDR) pc;
529 }
530}
531
bf9ae9d8
TBA
532void
533x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 534{
3aee8918 535 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
536
537 if (use_64bit)
538 {
6598661d
PA
539 uint64_t newpc = pc;
540
442ea881 541 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
542 }
543 else
544 {
6598661d
PA
545 uint32_t newpc = pc;
546
442ea881 547 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
548 }
549}
550\f
dd373349 551static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
552#define x86_breakpoint_len 1
553
554static int
555x86_breakpoint_at (CORE_ADDR pc)
556{
557 unsigned char c;
558
52405d85 559 the_target->read_memory (pc, &c, 1);
d0722149
DE
560 if (c == 0xCC)
561 return 1;
562
563 return 0;
564}
565\f
42995dbd 566/* Low-level function vector. */
df7e5265 567struct x86_dr_low_type x86_dr_low =
42995dbd 568 {
d33472ad
GB
569 x86_linux_dr_set_control,
570 x86_linux_dr_set_addr,
571 x86_linux_dr_get_addr,
572 x86_linux_dr_get_status,
573 x86_linux_dr_get_control,
42995dbd
GB
574 sizeof (void *),
575 };
aa5ca48f 576\f
90d74c30 577/* Breakpoint/Watchpoint support. */
aa5ca48f
DE
578
579static int
802e8e6d
PA
580x86_supports_z_point_type (char z_type)
581{
582 switch (z_type)
583 {
584 case Z_PACKET_SW_BP:
585 case Z_PACKET_HW_BP:
586 case Z_PACKET_WRITE_WP:
587 case Z_PACKET_ACCESS_WP:
588 return 1;
589 default:
590 return 0;
591 }
592}
593
594static int
595x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
596 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
597{
598 struct process_info *proc = current_process ();
802e8e6d 599
aa5ca48f
DE
600 switch (type)
601 {
802e8e6d
PA
602 case raw_bkpt_type_hw:
603 case raw_bkpt_type_write_wp:
604 case raw_bkpt_type_access_wp:
a4165e94 605 {
802e8e6d
PA
606 enum target_hw_bp_type hw_type
607 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 608 struct x86_debug_reg_state *state
fe978cb0 609 = &proc->priv->arch_private->debug_reg_state;
a4165e94 610
df7e5265 611 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 612 }
961bd387 613
aa5ca48f
DE
614 default:
615 /* Unsupported. */
616 return 1;
617 }
618}
619
620static int
802e8e6d
PA
621x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
622 int size, struct raw_breakpoint *bp)
aa5ca48f
DE
623{
624 struct process_info *proc = current_process ();
802e8e6d 625
aa5ca48f
DE
626 switch (type)
627 {
802e8e6d
PA
628 case raw_bkpt_type_hw:
629 case raw_bkpt_type_write_wp:
630 case raw_bkpt_type_access_wp:
a4165e94 631 {
802e8e6d
PA
632 enum target_hw_bp_type hw_type
633 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 634 struct x86_debug_reg_state *state
fe978cb0 635 = &proc->priv->arch_private->debug_reg_state;
a4165e94 636
df7e5265 637 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 638 }
aa5ca48f
DE
639 default:
640 /* Unsupported. */
641 return 1;
642 }
643}
644
645static int
646x86_stopped_by_watchpoint (void)
647{
648 struct process_info *proc = current_process ();
fe978cb0 649 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
650}
651
652static CORE_ADDR
653x86_stopped_data_address (void)
654{
655 struct process_info *proc = current_process ();
656 CORE_ADDR addr;
fe978cb0 657 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 658 &addr))
aa5ca48f
DE
659 return addr;
660 return 0;
661}
662\f
663/* Called when a new process is created. */
664
665static struct arch_process_info *
666x86_linux_new_process (void)
667{
ed859da7 668 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 669
df7e5265 670 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
671
672 return info;
673}
674
04ec7890
SM
675/* Called when a process is being deleted. */
676
677static void
678x86_linux_delete_process (struct arch_process_info *info)
679{
680 xfree (info);
681}
682
3a8a0396
DB
683/* Target routine for linux_new_fork. */
684
685static void
686x86_linux_new_fork (struct process_info *parent, struct process_info *child)
687{
688 /* These are allocated by linux_add_process. */
689 gdb_assert (parent->priv != NULL
690 && parent->priv->arch_private != NULL);
691 gdb_assert (child->priv != NULL
692 && child->priv->arch_private != NULL);
693
694 /* Linux kernel before 2.6.33 commit
695 72f674d203cd230426437cdcf7dd6f681dad8b0d
696 will inherit hardware debug registers from parent
697 on fork/vfork/clone. Newer Linux kernels create such tasks with
698 zeroed debug registers.
699
700 GDB core assumes the child inherits the watchpoints/hw
701 breakpoints of the parent, and will remove them all from the
702 forked off process. Copy the debug registers mirrors into the
703 new process so that all breakpoints and watchpoints can be
704 removed together. The debug registers mirror will become zeroed
705 in the end before detaching the forked off process, thus making
706 this compatible with older Linux kernels too. */
707
708 *child->priv->arch_private = *parent->priv->arch_private;
709}
710
70a0bb6b
GB
711/* See nat/x86-dregs.h. */
712
713struct x86_debug_reg_state *
714x86_debug_reg_state (pid_t pid)
715{
716 struct process_info *proc = find_process_pid (pid);
717
718 return &proc->priv->arch_private->debug_reg_state;
719}
aa5ca48f 720\f
d0722149
DE
721/* When GDBSERVER is built as a 64-bit application on linux, the
722 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
723 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
724 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
725 conversion in-place ourselves. */
726
9cf12d57 727/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
728 layout of the inferiors' architecture. Returns true if any
729 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 730 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
731 INF. */
732
733static int
9cf12d57 734x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
735{
736#ifdef __x86_64__
760256f9 737 unsigned int machine;
0bfdf32f 738 int tid = lwpid_of (current_thread);
760256f9
PA
739 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
740
d0722149 741 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
3aee8918 742 if (!is_64bit_tdesc ())
9cf12d57 743 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 744 FIXUP_32);
c92b5177 745 /* No fixup for native x32 GDB. */
760256f9 746 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 747 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 748 FIXUP_X32);
d0722149
DE
749#endif
750
751 return 0;
752}
753\f
1570b33e
L
754static int use_xml;
755
3aee8918
PA
756/* Format of XSAVE extended state is:
757 struct
758 {
759 fxsave_bytes[0..463]
760 sw_usable_bytes[464..511]
761 xstate_hdr_bytes[512..575]
762 avx_bytes[576..831]
763 future_state etc
764 };
765
766 Same memory layout will be used for the coredump NT_X86_XSTATE
767 representing the XSAVE extended state registers.
768
769 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
770 extended state mask, which is the same as the extended control register
771 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
772 together with the mask saved in the xstate_hdr_bytes to determine what
773 states the processor/OS supports and what state, used or initialized,
774 the process/thread is in. */
775#define I386_LINUX_XSAVE_XCR0_OFFSET 464
776
777/* Does the current host support the GETFPXREGS request? The header
778 file may or may not define it, and even if it is defined, the
779 kernel will return EIO if it's running on a pre-SSE processor. */
780int have_ptrace_getfpxregs =
781#ifdef HAVE_PTRACE_GETFPXREGS
782 -1
783#else
784 0
785#endif
786;
1570b33e 787
3aee8918
PA
788/* Get Linux/x86 target description from running target. */
789
790static const struct target_desc *
791x86_linux_read_description (void)
1570b33e 792{
3aee8918
PA
793 unsigned int machine;
794 int is_elf64;
a196ebeb 795 int xcr0_features;
3aee8918
PA
796 int tid;
797 static uint64_t xcr0;
3a13a53b 798 struct regset_info *regset;
1570b33e 799
0bfdf32f 800 tid = lwpid_of (current_thread);
1570b33e 801
3aee8918 802 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 803
3aee8918 804 if (sizeof (void *) == 4)
3a13a53b 805 {
3aee8918
PA
806 if (is_elf64 > 0)
807 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
808#ifndef __x86_64__
809 else if (machine == EM_X86_64)
810 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
811#endif
812 }
3a13a53b 813
3aee8918
PA
814#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
815 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
816 {
817 elf_fpxregset_t fpxregs;
3a13a53b 818
3aee8918 819 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 820 {
3aee8918
PA
821 have_ptrace_getfpxregs = 0;
822 have_ptrace_getregset = 0;
f49ff000 823 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 824 }
3aee8918
PA
825 else
826 have_ptrace_getfpxregs = 1;
3a13a53b 827 }
1570b33e
L
828#endif
829
830 if (!use_xml)
831 {
df7e5265 832 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 833
1570b33e
L
834 /* Don't use XML. */
835#ifdef __x86_64__
3aee8918
PA
836 if (machine == EM_X86_64)
837 return tdesc_amd64_linux_no_xml;
1570b33e 838 else
1570b33e 839#endif
3aee8918 840 return tdesc_i386_linux_no_xml;
1570b33e
L
841 }
842
1570b33e
L
843 if (have_ptrace_getregset == -1)
844 {
df7e5265 845 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 846 struct iovec iov;
1570b33e
L
847
848 iov.iov_base = xstateregs;
849 iov.iov_len = sizeof (xstateregs);
850
851 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
852 if (ptrace (PTRACE_GETREGSET, tid,
853 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
854 have_ptrace_getregset = 0;
855 else
1570b33e 856 {
3aee8918
PA
857 have_ptrace_getregset = 1;
858
859 /* Get XCR0 from XSAVE extended state. */
860 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
861 / sizeof (uint64_t))];
862
863 /* Use PTRACE_GETREGSET if it is available. */
864 for (regset = x86_regsets;
865 regset->fill_function != NULL; regset++)
866 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 867 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
868 else if (regset->type != GENERAL_REGS)
869 regset->size = 0;
1570b33e 870 }
1570b33e
L
871 }
872
3aee8918 873 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 874 xcr0_features = (have_ptrace_getregset
2e1e43e1 875 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 876
a196ebeb 877 if (xcr0_features)
3aee8918 878 x86_xcr0 = xcr0;
1570b33e 879
3aee8918
PA
880 if (machine == EM_X86_64)
881 {
1570b33e 882#ifdef __x86_64__
b4570e4b 883 const target_desc *tdesc = NULL;
a196ebeb 884
b4570e4b 885 if (xcr0_features)
3aee8918 886 {
b4570e4b
YQ
887 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
888 !is_elf64);
1570b33e 889 }
b4570e4b
YQ
890
891 if (tdesc == NULL)
892 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
893 return tdesc;
3aee8918 894#endif
1570b33e 895 }
3aee8918
PA
896 else
897 {
f49ff000 898 const target_desc *tdesc = NULL;
a1fa17ee 899
f49ff000
YQ
900 if (xcr0_features)
901 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 902
f49ff000
YQ
903 if (tdesc == NULL)
904 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 905
f49ff000 906 return tdesc;
3aee8918
PA
907 }
908
909 gdb_assert_not_reached ("failed to return tdesc");
910}
911
3aee8918
PA
912/* Update all the target description of all processes; a new GDB
913 connected, and it may or not support xml target descriptions. */
914
797bcff5
TBA
915void
916x86_target::update_xmltarget ()
3aee8918 917{
0bfdf32f 918 struct thread_info *saved_thread = current_thread;
3aee8918
PA
919
920 /* Before changing the register cache's internal layout, flush the
921 contents of the current valid caches back to the threads, and
922 release the current regcache objects. */
923 regcache_release ();
924
797bcff5 925 for_each_process ([this] (process_info *proc) {
9179355e
SM
926 int pid = proc->pid;
927
928 /* Look up any thread of this process. */
929 current_thread = find_any_thread_of_pid (pid);
930
797bcff5 931 low_arch_setup ();
9179355e 932 });
3aee8918 933
0bfdf32f 934 current_thread = saved_thread;
1570b33e
L
935}
936
937/* Process qSupported query, "xmlRegisters=". Update the buffer size for
938 PTRACE_GETREGSET. */
939
940static void
06e03fff 941x86_linux_process_qsupported (char **features, int count)
1570b33e 942{
06e03fff
PA
943 int i;
944
1570b33e
L
945 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
946 with "i386" in qSupported query, it supports x86 XML target
947 descriptions. */
948 use_xml = 0;
06e03fff 949 for (i = 0; i < count; i++)
1570b33e 950 {
06e03fff 951 const char *feature = features[i];
1570b33e 952
06e03fff 953 if (startswith (feature, "xmlRegisters="))
1570b33e 954 {
06e03fff 955 char *copy = xstrdup (feature + 13);
06e03fff 956
ca3a04f6
CB
957 char *saveptr;
958 for (char *p = strtok_r (copy, ",", &saveptr);
959 p != NULL;
960 p = strtok_r (NULL, ",", &saveptr))
1570b33e 961 {
06e03fff
PA
962 if (strcmp (p, "i386") == 0)
963 {
964 use_xml = 1;
965 break;
966 }
1570b33e 967 }
1570b33e 968
06e03fff
PA
969 free (copy);
970 }
1570b33e 971 }
797bcff5 972 the_x86_target.update_xmltarget ();
1570b33e
L
973}
974
3aee8918 975/* Common for x86/x86-64. */
d0722149 976
3aee8918
PA
977static struct regsets_info x86_regsets_info =
978 {
979 x86_regsets, /* regsets */
980 0, /* num_regsets */
981 NULL, /* disabled_regsets */
982 };
214d508e
L
983
984#ifdef __x86_64__
3aee8918
PA
985static struct regs_info amd64_linux_regs_info =
986 {
987 NULL, /* regset_bitmap */
988 NULL, /* usrregs_info */
989 &x86_regsets_info
990 };
d0722149 991#endif
3aee8918
PA
992static struct usrregs_info i386_linux_usrregs_info =
993 {
994 I386_NUM_REGS,
995 i386_regmap,
996 };
d0722149 997
3aee8918
PA
998static struct regs_info i386_linux_regs_info =
999 {
1000 NULL, /* regset_bitmap */
1001 &i386_linux_usrregs_info,
1002 &x86_regsets_info
1003 };
d0722149 1004
aa8d21c9
TBA
1005const regs_info *
1006x86_target::get_regs_info ()
3aee8918
PA
1007{
1008#ifdef __x86_64__
1009 if (is_64bit_tdesc ())
1010 return &amd64_linux_regs_info;
1011 else
1012#endif
1013 return &i386_linux_regs_info;
1014}
d0722149 1015
3aee8918
PA
1016/* Initialize the target description for the architecture of the
1017 inferior. */
1570b33e 1018
797bcff5
TBA
1019void
1020x86_target::low_arch_setup ()
3aee8918
PA
1021{
1022 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1023}
1024
82075af2
JS
1025/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1026 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1027
1028static void
4cc32bec 1029x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
82075af2
JS
1030{
1031 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1032
1033 if (use_64bit)
1034 {
1035 long l_sysno;
82075af2
JS
1036
1037 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1038 *sysno = (int) l_sysno;
82075af2
JS
1039 }
1040 else
4cc32bec 1041 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1042}
1043
219f2f23
PA
1044static int
1045x86_supports_tracepoints (void)
1046{
1047 return 1;
1048}
1049
fa593d66
PA
1050static void
1051append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1052{
4196ab2a 1053 target_write_memory (*to, buf, len);
fa593d66
PA
1054 *to += len;
1055}
1056
1057static int
a121b7c1 1058push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1059{
1060 unsigned char *buf_org = buf;
1061
1062 while (1)
1063 {
1064 char *endptr;
1065 unsigned long ul = strtoul (op, &endptr, 16);
1066
1067 if (endptr == op)
1068 break;
1069
1070 *buf++ = ul;
1071 op = endptr;
1072 }
1073
1074 return buf - buf_org;
1075}
1076
1077#ifdef __x86_64__
1078
1079/* Build a jump pad that saves registers and calls a collection
1080 function. Writes a jump instruction to the jump pad to
1081 JJUMPAD_INSN. The caller is responsible to write it in at the
1082 tracepoint address. */
1083
1084static int
1085amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1086 CORE_ADDR collector,
1087 CORE_ADDR lockaddr,
1088 ULONGEST orig_size,
1089 CORE_ADDR *jump_entry,
405f8e94
SS
1090 CORE_ADDR *trampoline,
1091 ULONGEST *trampoline_size,
fa593d66
PA
1092 unsigned char *jjump_pad_insn,
1093 ULONGEST *jjump_pad_insn_size,
1094 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1095 CORE_ADDR *adjusted_insn_addr_end,
1096 char *err)
fa593d66
PA
1097{
1098 unsigned char buf[40];
1099 int i, offset;
f4647387
YQ
1100 int64_t loffset;
1101
fa593d66
PA
1102 CORE_ADDR buildaddr = *jump_entry;
1103
1104 /* Build the jump pad. */
1105
1106 /* First, do tracepoint data collection. Save registers. */
1107 i = 0;
1108 /* Need to ensure stack pointer saved first. */
1109 buf[i++] = 0x54; /* push %rsp */
1110 buf[i++] = 0x55; /* push %rbp */
1111 buf[i++] = 0x57; /* push %rdi */
1112 buf[i++] = 0x56; /* push %rsi */
1113 buf[i++] = 0x52; /* push %rdx */
1114 buf[i++] = 0x51; /* push %rcx */
1115 buf[i++] = 0x53; /* push %rbx */
1116 buf[i++] = 0x50; /* push %rax */
1117 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1118 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1119 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1120 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1121 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1122 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1123 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1124 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1125 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1126 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1127 buf[i++] = 0xbf;
c8ef42ee
PA
1128 memcpy (buf + i, &tpaddr, 8);
1129 i += 8;
fa593d66
PA
1130 buf[i++] = 0x57; /* push %rdi */
1131 append_insns (&buildaddr, i, buf);
1132
1133 /* Stack space for the collecting_t object. */
1134 i = 0;
1135 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1136 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1137 memcpy (buf + i, &tpoint, 8);
1138 i += 8;
1139 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1140 i += push_opcode (&buf[i],
1141 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1142 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1143 append_insns (&buildaddr, i, buf);
1144
1145 /* spin-lock. */
1146 i = 0;
1147 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1148 memcpy (&buf[i], (void *) &lockaddr, 8);
1149 i += 8;
1150 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1151 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1152 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1153 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1154 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1155 append_insns (&buildaddr, i, buf);
1156
1157 /* Set up the gdb_collect call. */
1158 /* At this point, (stack pointer + 0x18) is the base of our saved
1159 register block. */
1160
1161 i = 0;
1162 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1163 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1164
1165 /* tpoint address may be 64-bit wide. */
1166 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1167 memcpy (buf + i, &tpoint, 8);
1168 i += 8;
1169 append_insns (&buildaddr, i, buf);
1170
1171 /* The collector function being in the shared library, may be
1172 >31-bits away off the jump pad. */
1173 i = 0;
1174 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1175 memcpy (buf + i, &collector, 8);
1176 i += 8;
1177 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1178 append_insns (&buildaddr, i, buf);
1179
1180 /* Clear the spin-lock. */
1181 i = 0;
1182 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1183 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1184 memcpy (buf + i, &lockaddr, 8);
1185 i += 8;
1186 append_insns (&buildaddr, i, buf);
1187
1188 /* Remove stack that had been used for the collect_t object. */
1189 i = 0;
1190 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1191 append_insns (&buildaddr, i, buf);
1192
1193 /* Restore register state. */
1194 i = 0;
1195 buf[i++] = 0x48; /* add $0x8,%rsp */
1196 buf[i++] = 0x83;
1197 buf[i++] = 0xc4;
1198 buf[i++] = 0x08;
1199 buf[i++] = 0x9d; /* popfq */
1200 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1201 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1202 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1203 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1204 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1205 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1206 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1207 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1208 buf[i++] = 0x58; /* pop %rax */
1209 buf[i++] = 0x5b; /* pop %rbx */
1210 buf[i++] = 0x59; /* pop %rcx */
1211 buf[i++] = 0x5a; /* pop %rdx */
1212 buf[i++] = 0x5e; /* pop %rsi */
1213 buf[i++] = 0x5f; /* pop %rdi */
1214 buf[i++] = 0x5d; /* pop %rbp */
1215 buf[i++] = 0x5c; /* pop %rsp */
1216 append_insns (&buildaddr, i, buf);
1217
1218 /* Now, adjust the original instruction to execute in the jump
1219 pad. */
1220 *adjusted_insn_addr = buildaddr;
1221 relocate_instruction (&buildaddr, tpaddr);
1222 *adjusted_insn_addr_end = buildaddr;
1223
1224 /* Finally, write a jump back to the program. */
f4647387
YQ
1225
1226 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1227 if (loffset > INT_MAX || loffset < INT_MIN)
1228 {
1229 sprintf (err,
1230 "E.Jump back from jump pad too far from tracepoint "
1231 "(offset 0x%" PRIx64 " > int32).", loffset);
1232 return 1;
1233 }
1234
1235 offset = (int) loffset;
fa593d66
PA
1236 memcpy (buf, jump_insn, sizeof (jump_insn));
1237 memcpy (buf + 1, &offset, 4);
1238 append_insns (&buildaddr, sizeof (jump_insn), buf);
1239
1240 /* The jump pad is now built. Wire in a jump to our jump pad. This
1241 is always done last (by our caller actually), so that we can
1242 install fast tracepoints with threads running. This relies on
1243 the agent's atomic write support. */
f4647387
YQ
1244 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1245 if (loffset > INT_MAX || loffset < INT_MIN)
1246 {
1247 sprintf (err,
1248 "E.Jump pad too far from tracepoint "
1249 "(offset 0x%" PRIx64 " > int32).", loffset);
1250 return 1;
1251 }
1252
1253 offset = (int) loffset;
1254
fa593d66
PA
1255 memcpy (buf, jump_insn, sizeof (jump_insn));
1256 memcpy (buf + 1, &offset, 4);
1257 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1258 *jjump_pad_insn_size = sizeof (jump_insn);
1259
1260 /* Return the end address of our pad. */
1261 *jump_entry = buildaddr;
1262
1263 return 0;
1264}
1265
1266#endif /* __x86_64__ */
1267
1268/* Build a jump pad that saves registers and calls a collection
1269 function. Writes a jump instruction to the jump pad to
1270 JJUMPAD_INSN. The caller is responsible to write it in at the
1271 tracepoint address. */
1272
1273static int
1274i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1275 CORE_ADDR collector,
1276 CORE_ADDR lockaddr,
1277 ULONGEST orig_size,
1278 CORE_ADDR *jump_entry,
405f8e94
SS
1279 CORE_ADDR *trampoline,
1280 ULONGEST *trampoline_size,
fa593d66
PA
1281 unsigned char *jjump_pad_insn,
1282 ULONGEST *jjump_pad_insn_size,
1283 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1284 CORE_ADDR *adjusted_insn_addr_end,
1285 char *err)
fa593d66
PA
1286{
1287 unsigned char buf[0x100];
1288 int i, offset;
1289 CORE_ADDR buildaddr = *jump_entry;
1290
1291 /* Build the jump pad. */
1292
1293 /* First, do tracepoint data collection. Save registers. */
1294 i = 0;
1295 buf[i++] = 0x60; /* pushad */
1296 buf[i++] = 0x68; /* push tpaddr aka $pc */
1297 *((int *)(buf + i)) = (int) tpaddr;
1298 i += 4;
1299 buf[i++] = 0x9c; /* pushf */
1300 buf[i++] = 0x1e; /* push %ds */
1301 buf[i++] = 0x06; /* push %es */
1302 buf[i++] = 0x0f; /* push %fs */
1303 buf[i++] = 0xa0;
1304 buf[i++] = 0x0f; /* push %gs */
1305 buf[i++] = 0xa8;
1306 buf[i++] = 0x16; /* push %ss */
1307 buf[i++] = 0x0e; /* push %cs */
1308 append_insns (&buildaddr, i, buf);
1309
1310 /* Stack space for the collecting_t object. */
1311 i = 0;
1312 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1313
1314 /* Build the object. */
1315 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1316 memcpy (buf + i, &tpoint, 4);
1317 i += 4;
1318 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1319
1320 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1321 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1322 append_insns (&buildaddr, i, buf);
1323
1324 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1325 If we cared for it, this could be using xchg alternatively. */
1326
1327 i = 0;
1328 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1329 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1330 %esp,<lockaddr> */
1331 memcpy (&buf[i], (void *) &lockaddr, 4);
1332 i += 4;
1333 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1334 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1335 append_insns (&buildaddr, i, buf);
1336
1337
1338 /* Set up arguments to the gdb_collect call. */
1339 i = 0;
1340 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1341 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1342 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1343 append_insns (&buildaddr, i, buf);
1344
1345 i = 0;
1346 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1347 append_insns (&buildaddr, i, buf);
1348
1349 i = 0;
1350 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1351 memcpy (&buf[i], (void *) &tpoint, 4);
1352 i += 4;
1353 append_insns (&buildaddr, i, buf);
1354
1355 buf[0] = 0xe8; /* call <reladdr> */
1356 offset = collector - (buildaddr + sizeof (jump_insn));
1357 memcpy (buf + 1, &offset, 4);
1358 append_insns (&buildaddr, 5, buf);
1359 /* Clean up after the call. */
1360 buf[0] = 0x83; /* add $0x8,%esp */
1361 buf[1] = 0xc4;
1362 buf[2] = 0x08;
1363 append_insns (&buildaddr, 3, buf);
1364
1365
1366 /* Clear the spin-lock. This would need the LOCK prefix on older
1367 broken archs. */
1368 i = 0;
1369 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1370 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1371 memcpy (buf + i, &lockaddr, 4);
1372 i += 4;
1373 append_insns (&buildaddr, i, buf);
1374
1375
1376 /* Remove stack that had been used for the collect_t object. */
1377 i = 0;
1378 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1379 append_insns (&buildaddr, i, buf);
1380
1381 i = 0;
1382 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1383 buf[i++] = 0xc4;
1384 buf[i++] = 0x04;
1385 buf[i++] = 0x17; /* pop %ss */
1386 buf[i++] = 0x0f; /* pop %gs */
1387 buf[i++] = 0xa9;
1388 buf[i++] = 0x0f; /* pop %fs */
1389 buf[i++] = 0xa1;
1390 buf[i++] = 0x07; /* pop %es */
405f8e94 1391 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1392 buf[i++] = 0x9d; /* popf */
1393 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1394 buf[i++] = 0xc4;
1395 buf[i++] = 0x04;
1396 buf[i++] = 0x61; /* popad */
1397 append_insns (&buildaddr, i, buf);
1398
1399 /* Now, adjust the original instruction to execute in the jump
1400 pad. */
1401 *adjusted_insn_addr = buildaddr;
1402 relocate_instruction (&buildaddr, tpaddr);
1403 *adjusted_insn_addr_end = buildaddr;
1404
1405 /* Write the jump back to the program. */
1406 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1407 memcpy (buf, jump_insn, sizeof (jump_insn));
1408 memcpy (buf + 1, &offset, 4);
1409 append_insns (&buildaddr, sizeof (jump_insn), buf);
1410
1411 /* The jump pad is now built. Wire in a jump to our jump pad. This
1412 is always done last (by our caller actually), so that we can
1413 install fast tracepoints with threads running. This relies on
1414 the agent's atomic write support. */
405f8e94
SS
1415 if (orig_size == 4)
1416 {
1417 /* Create a trampoline. */
1418 *trampoline_size = sizeof (jump_insn);
1419 if (!claim_trampoline_space (*trampoline_size, trampoline))
1420 {
1421 /* No trampoline space available. */
1422 strcpy (err,
1423 "E.Cannot allocate trampoline space needed for fast "
1424 "tracepoints on 4-byte instructions.");
1425 return 1;
1426 }
1427
1428 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1429 memcpy (buf, jump_insn, sizeof (jump_insn));
1430 memcpy (buf + 1, &offset, 4);
4196ab2a 1431 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1432
1433 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1434 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1435 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1436 memcpy (buf + 2, &offset, 2);
1437 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1438 *jjump_pad_insn_size = sizeof (small_jump_insn);
1439 }
1440 else
1441 {
1442 /* Else use a 32-bit relative jump instruction. */
1443 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1444 memcpy (buf, jump_insn, sizeof (jump_insn));
1445 memcpy (buf + 1, &offset, 4);
1446 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1447 *jjump_pad_insn_size = sizeof (jump_insn);
1448 }
fa593d66
PA
1449
1450 /* Return the end address of our pad. */
1451 *jump_entry = buildaddr;
1452
1453 return 0;
1454}
1455
1456static int
1457x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1458 CORE_ADDR collector,
1459 CORE_ADDR lockaddr,
1460 ULONGEST orig_size,
1461 CORE_ADDR *jump_entry,
405f8e94
SS
1462 CORE_ADDR *trampoline,
1463 ULONGEST *trampoline_size,
fa593d66
PA
1464 unsigned char *jjump_pad_insn,
1465 ULONGEST *jjump_pad_insn_size,
1466 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1467 CORE_ADDR *adjusted_insn_addr_end,
1468 char *err)
fa593d66
PA
1469{
1470#ifdef __x86_64__
3aee8918 1471 if (is_64bit_tdesc ())
fa593d66
PA
1472 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1473 collector, lockaddr,
1474 orig_size, jump_entry,
405f8e94 1475 trampoline, trampoline_size,
fa593d66
PA
1476 jjump_pad_insn,
1477 jjump_pad_insn_size,
1478 adjusted_insn_addr,
405f8e94
SS
1479 adjusted_insn_addr_end,
1480 err);
fa593d66
PA
1481#endif
1482
1483 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1484 collector, lockaddr,
1485 orig_size, jump_entry,
405f8e94 1486 trampoline, trampoline_size,
fa593d66
PA
1487 jjump_pad_insn,
1488 jjump_pad_insn_size,
1489 adjusted_insn_addr,
405f8e94
SS
1490 adjusted_insn_addr_end,
1491 err);
1492}
1493
1494/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1495 architectures. */
1496
1497static int
1498x86_get_min_fast_tracepoint_insn_len (void)
1499{
1500 static int warned_about_fast_tracepoints = 0;
1501
1502#ifdef __x86_64__
1503 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1504 used for fast tracepoints. */
3aee8918 1505 if (is_64bit_tdesc ())
405f8e94
SS
1506 return 5;
1507#endif
1508
58b4daa5 1509 if (agent_loaded_p ())
405f8e94
SS
1510 {
1511 char errbuf[IPA_BUFSIZ];
1512
1513 errbuf[0] = '\0';
1514
1515 /* On x86, if trampolines are available, then 4-byte jump instructions
1516 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1517 with a 4-byte offset are used instead. */
1518 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1519 return 4;
1520 else
1521 {
1522 /* GDB has no channel to explain to user why a shorter fast
1523 tracepoint is not possible, but at least make GDBserver
1524 mention that something has gone awry. */
1525 if (!warned_about_fast_tracepoints)
1526 {
422186a9 1527 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1528 warned_about_fast_tracepoints = 1;
1529 }
1530 return 5;
1531 }
1532 }
1533 else
1534 {
1535 /* Indicate that the minimum length is currently unknown since the IPA
1536 has not loaded yet. */
1537 return 0;
1538 }
fa593d66
PA
1539}
1540
6a271cae
PA
1541static void
1542add_insns (unsigned char *start, int len)
1543{
1544 CORE_ADDR buildaddr = current_insn_ptr;
1545
1546 if (debug_threads)
87ce2a04
DE
1547 debug_printf ("Adding %d bytes of insn at %s\n",
1548 len, paddress (buildaddr));
6a271cae
PA
1549
1550 append_insns (&buildaddr, len, start);
1551 current_insn_ptr = buildaddr;
1552}
1553
6a271cae
PA
1554/* Our general strategy for emitting code is to avoid specifying raw
1555 bytes whenever possible, and instead copy a block of inline asm
1556 that is embedded in the function. This is a little messy, because
1557 we need to keep the compiler from discarding what looks like dead
1558 code, plus suppress various warnings. */
1559
9e4344e5
PA
1560#define EMIT_ASM(NAME, INSNS) \
1561 do \
1562 { \
1563 extern unsigned char start_ ## NAME, end_ ## NAME; \
1564 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1565 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1566 "\t" "start_" #NAME ":" \
1567 "\t" INSNS "\n" \
1568 "\t" "end_" #NAME ":"); \
1569 } while (0)
6a271cae
PA
1570
1571#ifdef __x86_64__
1572
1573#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1574 do \
1575 { \
1576 extern unsigned char start_ ## NAME, end_ ## NAME; \
1577 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1578 __asm__ (".code32\n" \
1579 "\t" "jmp end_" #NAME "\n" \
1580 "\t" "start_" #NAME ":\n" \
1581 "\t" INSNS "\n" \
1582 "\t" "end_" #NAME ":\n" \
1583 ".code64\n"); \
1584 } while (0)
6a271cae
PA
1585
1586#else
1587
1588#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1589
1590#endif
1591
1592#ifdef __x86_64__
1593
1594static void
1595amd64_emit_prologue (void)
1596{
1597 EMIT_ASM (amd64_prologue,
1598 "pushq %rbp\n\t"
1599 "movq %rsp,%rbp\n\t"
1600 "sub $0x20,%rsp\n\t"
1601 "movq %rdi,-8(%rbp)\n\t"
1602 "movq %rsi,-16(%rbp)");
1603}
1604
1605
1606static void
1607amd64_emit_epilogue (void)
1608{
1609 EMIT_ASM (amd64_epilogue,
1610 "movq -16(%rbp),%rdi\n\t"
1611 "movq %rax,(%rdi)\n\t"
1612 "xor %rax,%rax\n\t"
1613 "leave\n\t"
1614 "ret");
1615}
1616
1617static void
1618amd64_emit_add (void)
1619{
1620 EMIT_ASM (amd64_add,
1621 "add (%rsp),%rax\n\t"
1622 "lea 0x8(%rsp),%rsp");
1623}
1624
1625static void
1626amd64_emit_sub (void)
1627{
1628 EMIT_ASM (amd64_sub,
1629 "sub %rax,(%rsp)\n\t"
1630 "pop %rax");
1631}
1632
1633static void
1634amd64_emit_mul (void)
1635{
1636 emit_error = 1;
1637}
1638
1639static void
1640amd64_emit_lsh (void)
1641{
1642 emit_error = 1;
1643}
1644
1645static void
1646amd64_emit_rsh_signed (void)
1647{
1648 emit_error = 1;
1649}
1650
1651static void
1652amd64_emit_rsh_unsigned (void)
1653{
1654 emit_error = 1;
1655}
1656
1657static void
1658amd64_emit_ext (int arg)
1659{
1660 switch (arg)
1661 {
1662 case 8:
1663 EMIT_ASM (amd64_ext_8,
1664 "cbtw\n\t"
1665 "cwtl\n\t"
1666 "cltq");
1667 break;
1668 case 16:
1669 EMIT_ASM (amd64_ext_16,
1670 "cwtl\n\t"
1671 "cltq");
1672 break;
1673 case 32:
1674 EMIT_ASM (amd64_ext_32,
1675 "cltq");
1676 break;
1677 default:
1678 emit_error = 1;
1679 }
1680}
1681
1682static void
1683amd64_emit_log_not (void)
1684{
1685 EMIT_ASM (amd64_log_not,
1686 "test %rax,%rax\n\t"
1687 "sete %cl\n\t"
1688 "movzbq %cl,%rax");
1689}
1690
1691static void
1692amd64_emit_bit_and (void)
1693{
1694 EMIT_ASM (amd64_and,
1695 "and (%rsp),%rax\n\t"
1696 "lea 0x8(%rsp),%rsp");
1697}
1698
1699static void
1700amd64_emit_bit_or (void)
1701{
1702 EMIT_ASM (amd64_or,
1703 "or (%rsp),%rax\n\t"
1704 "lea 0x8(%rsp),%rsp");
1705}
1706
1707static void
1708amd64_emit_bit_xor (void)
1709{
1710 EMIT_ASM (amd64_xor,
1711 "xor (%rsp),%rax\n\t"
1712 "lea 0x8(%rsp),%rsp");
1713}
1714
1715static void
1716amd64_emit_bit_not (void)
1717{
1718 EMIT_ASM (amd64_bit_not,
1719 "xorq $0xffffffffffffffff,%rax");
1720}
1721
1722static void
1723amd64_emit_equal (void)
1724{
1725 EMIT_ASM (amd64_equal,
1726 "cmp %rax,(%rsp)\n\t"
1727 "je .Lamd64_equal_true\n\t"
1728 "xor %rax,%rax\n\t"
1729 "jmp .Lamd64_equal_end\n\t"
1730 ".Lamd64_equal_true:\n\t"
1731 "mov $0x1,%rax\n\t"
1732 ".Lamd64_equal_end:\n\t"
1733 "lea 0x8(%rsp),%rsp");
1734}
1735
1736static void
1737amd64_emit_less_signed (void)
1738{
1739 EMIT_ASM (amd64_less_signed,
1740 "cmp %rax,(%rsp)\n\t"
1741 "jl .Lamd64_less_signed_true\n\t"
1742 "xor %rax,%rax\n\t"
1743 "jmp .Lamd64_less_signed_end\n\t"
1744 ".Lamd64_less_signed_true:\n\t"
1745 "mov $1,%rax\n\t"
1746 ".Lamd64_less_signed_end:\n\t"
1747 "lea 0x8(%rsp),%rsp");
1748}
1749
1750static void
1751amd64_emit_less_unsigned (void)
1752{
1753 EMIT_ASM (amd64_less_unsigned,
1754 "cmp %rax,(%rsp)\n\t"
1755 "jb .Lamd64_less_unsigned_true\n\t"
1756 "xor %rax,%rax\n\t"
1757 "jmp .Lamd64_less_unsigned_end\n\t"
1758 ".Lamd64_less_unsigned_true:\n\t"
1759 "mov $1,%rax\n\t"
1760 ".Lamd64_less_unsigned_end:\n\t"
1761 "lea 0x8(%rsp),%rsp");
1762}
1763
1764static void
1765amd64_emit_ref (int size)
1766{
1767 switch (size)
1768 {
1769 case 1:
1770 EMIT_ASM (amd64_ref1,
1771 "movb (%rax),%al");
1772 break;
1773 case 2:
1774 EMIT_ASM (amd64_ref2,
1775 "movw (%rax),%ax");
1776 break;
1777 case 4:
1778 EMIT_ASM (amd64_ref4,
1779 "movl (%rax),%eax");
1780 break;
1781 case 8:
1782 EMIT_ASM (amd64_ref8,
1783 "movq (%rax),%rax");
1784 break;
1785 }
1786}
1787
1788static void
1789amd64_emit_if_goto (int *offset_p, int *size_p)
1790{
1791 EMIT_ASM (amd64_if_goto,
1792 "mov %rax,%rcx\n\t"
1793 "pop %rax\n\t"
1794 "cmp $0,%rcx\n\t"
1795 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1796 if (offset_p)
1797 *offset_p = 10;
1798 if (size_p)
1799 *size_p = 4;
1800}
1801
1802static void
1803amd64_emit_goto (int *offset_p, int *size_p)
1804{
1805 EMIT_ASM (amd64_goto,
1806 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1807 if (offset_p)
1808 *offset_p = 1;
1809 if (size_p)
1810 *size_p = 4;
1811}
1812
1813static void
1814amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1815{
1816 int diff = (to - (from + size));
1817 unsigned char buf[sizeof (int)];
1818
1819 if (size != 4)
1820 {
1821 emit_error = 1;
1822 return;
1823 }
1824
1825 memcpy (buf, &diff, sizeof (int));
4196ab2a 1826 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1827}
1828
1829static void
4e29fb54 1830amd64_emit_const (LONGEST num)
6a271cae
PA
1831{
1832 unsigned char buf[16];
1833 int i;
1834 CORE_ADDR buildaddr = current_insn_ptr;
1835
1836 i = 0;
1837 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1838 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1839 i += 8;
1840 append_insns (&buildaddr, i, buf);
1841 current_insn_ptr = buildaddr;
1842}
1843
1844static void
1845amd64_emit_call (CORE_ADDR fn)
1846{
1847 unsigned char buf[16];
1848 int i;
1849 CORE_ADDR buildaddr;
4e29fb54 1850 LONGEST offset64;
6a271cae
PA
1851
1852 /* The destination function being in the shared library, may be
1853 >31-bits away off the compiled code pad. */
1854
1855 buildaddr = current_insn_ptr;
1856
1857 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1858
1859 i = 0;
1860
1861 if (offset64 > INT_MAX || offset64 < INT_MIN)
1862 {
1863 /* Offset is too large for a call. Use callq, but that requires
1864 a register, so avoid it if possible. Use r10, since it is
1865 call-clobbered, we don't have to push/pop it. */
1866 buf[i++] = 0x48; /* mov $fn,%r10 */
1867 buf[i++] = 0xba;
1868 memcpy (buf + i, &fn, 8);
1869 i += 8;
1870 buf[i++] = 0xff; /* callq *%r10 */
1871 buf[i++] = 0xd2;
1872 }
1873 else
1874 {
1875 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1876
1877 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1878 memcpy (buf + i, &offset32, 4);
1879 i += 4;
1880 }
1881
1882 append_insns (&buildaddr, i, buf);
1883 current_insn_ptr = buildaddr;
1884}
1885
1886static void
1887amd64_emit_reg (int reg)
1888{
1889 unsigned char buf[16];
1890 int i;
1891 CORE_ADDR buildaddr;
1892
1893 /* Assume raw_regs is still in %rdi. */
1894 buildaddr = current_insn_ptr;
1895 i = 0;
1896 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1897 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1898 i += 4;
1899 append_insns (&buildaddr, i, buf);
1900 current_insn_ptr = buildaddr;
1901 amd64_emit_call (get_raw_reg_func_addr ());
1902}
1903
1904static void
1905amd64_emit_pop (void)
1906{
1907 EMIT_ASM (amd64_pop,
1908 "pop %rax");
1909}
1910
1911static void
1912amd64_emit_stack_flush (void)
1913{
1914 EMIT_ASM (amd64_stack_flush,
1915 "push %rax");
1916}
1917
1918static void
1919amd64_emit_zero_ext (int arg)
1920{
1921 switch (arg)
1922 {
1923 case 8:
1924 EMIT_ASM (amd64_zero_ext_8,
1925 "and $0xff,%rax");
1926 break;
1927 case 16:
1928 EMIT_ASM (amd64_zero_ext_16,
1929 "and $0xffff,%rax");
1930 break;
1931 case 32:
1932 EMIT_ASM (amd64_zero_ext_32,
1933 "mov $0xffffffff,%rcx\n\t"
1934 "and %rcx,%rax");
1935 break;
1936 default:
1937 emit_error = 1;
1938 }
1939}
1940
1941static void
1942amd64_emit_swap (void)
1943{
1944 EMIT_ASM (amd64_swap,
1945 "mov %rax,%rcx\n\t"
1946 "pop %rax\n\t"
1947 "push %rcx");
1948}
1949
1950static void
1951amd64_emit_stack_adjust (int n)
1952{
1953 unsigned char buf[16];
1954 int i;
1955 CORE_ADDR buildaddr = current_insn_ptr;
1956
1957 i = 0;
1958 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1959 buf[i++] = 0x8d;
1960 buf[i++] = 0x64;
1961 buf[i++] = 0x24;
1962 /* This only handles adjustments up to 16, but we don't expect any more. */
1963 buf[i++] = n * 8;
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966}
1967
1968/* FN's prototype is `LONGEST(*fn)(int)'. */
1969
1970static void
1971amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1972{
1973 unsigned char buf[16];
1974 int i;
1975 CORE_ADDR buildaddr;
1976
1977 buildaddr = current_insn_ptr;
1978 i = 0;
1979 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1980 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
1981 i += 4;
1982 append_insns (&buildaddr, i, buf);
1983 current_insn_ptr = buildaddr;
1984 amd64_emit_call (fn);
1985}
1986
4e29fb54 1987/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1988
1989static void
1990amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1991{
1992 unsigned char buf[16];
1993 int i;
1994 CORE_ADDR buildaddr;
1995
1996 buildaddr = current_insn_ptr;
1997 i = 0;
1998 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 1999 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2000 i += 4;
2001 append_insns (&buildaddr, i, buf);
2002 current_insn_ptr = buildaddr;
2003 EMIT_ASM (amd64_void_call_2_a,
2004 /* Save away a copy of the stack top. */
2005 "push %rax\n\t"
2006 /* Also pass top as the second argument. */
2007 "mov %rax,%rsi");
2008 amd64_emit_call (fn);
2009 EMIT_ASM (amd64_void_call_2_b,
2010 /* Restore the stack top, %rax may have been trashed. */
2011 "pop %rax");
2012}
2013
df4a0200 2014static void
6b9801d4
SS
2015amd64_emit_eq_goto (int *offset_p, int *size_p)
2016{
2017 EMIT_ASM (amd64_eq,
2018 "cmp %rax,(%rsp)\n\t"
2019 "jne .Lamd64_eq_fallthru\n\t"
2020 "lea 0x8(%rsp),%rsp\n\t"
2021 "pop %rax\n\t"
2022 /* jmp, but don't trust the assembler to choose the right jump */
2023 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2024 ".Lamd64_eq_fallthru:\n\t"
2025 "lea 0x8(%rsp),%rsp\n\t"
2026 "pop %rax");
2027
2028 if (offset_p)
2029 *offset_p = 13;
2030 if (size_p)
2031 *size_p = 4;
2032}
2033
df4a0200 2034static void
6b9801d4
SS
2035amd64_emit_ne_goto (int *offset_p, int *size_p)
2036{
2037 EMIT_ASM (amd64_ne,
2038 "cmp %rax,(%rsp)\n\t"
2039 "je .Lamd64_ne_fallthru\n\t"
2040 "lea 0x8(%rsp),%rsp\n\t"
2041 "pop %rax\n\t"
2042 /* jmp, but don't trust the assembler to choose the right jump */
2043 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2044 ".Lamd64_ne_fallthru:\n\t"
2045 "lea 0x8(%rsp),%rsp\n\t"
2046 "pop %rax");
2047
2048 if (offset_p)
2049 *offset_p = 13;
2050 if (size_p)
2051 *size_p = 4;
2052}
2053
df4a0200 2054static void
6b9801d4
SS
2055amd64_emit_lt_goto (int *offset_p, int *size_p)
2056{
2057 EMIT_ASM (amd64_lt,
2058 "cmp %rax,(%rsp)\n\t"
2059 "jnl .Lamd64_lt_fallthru\n\t"
2060 "lea 0x8(%rsp),%rsp\n\t"
2061 "pop %rax\n\t"
2062 /* jmp, but don't trust the assembler to choose the right jump */
2063 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2064 ".Lamd64_lt_fallthru:\n\t"
2065 "lea 0x8(%rsp),%rsp\n\t"
2066 "pop %rax");
2067
2068 if (offset_p)
2069 *offset_p = 13;
2070 if (size_p)
2071 *size_p = 4;
2072}
2073
df4a0200 2074static void
6b9801d4
SS
2075amd64_emit_le_goto (int *offset_p, int *size_p)
2076{
2077 EMIT_ASM (amd64_le,
2078 "cmp %rax,(%rsp)\n\t"
2079 "jnle .Lamd64_le_fallthru\n\t"
2080 "lea 0x8(%rsp),%rsp\n\t"
2081 "pop %rax\n\t"
2082 /* jmp, but don't trust the assembler to choose the right jump */
2083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2084 ".Lamd64_le_fallthru:\n\t"
2085 "lea 0x8(%rsp),%rsp\n\t"
2086 "pop %rax");
2087
2088 if (offset_p)
2089 *offset_p = 13;
2090 if (size_p)
2091 *size_p = 4;
2092}
2093
df4a0200 2094static void
6b9801d4
SS
2095amd64_emit_gt_goto (int *offset_p, int *size_p)
2096{
2097 EMIT_ASM (amd64_gt,
2098 "cmp %rax,(%rsp)\n\t"
2099 "jng .Lamd64_gt_fallthru\n\t"
2100 "lea 0x8(%rsp),%rsp\n\t"
2101 "pop %rax\n\t"
2102 /* jmp, but don't trust the assembler to choose the right jump */
2103 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2104 ".Lamd64_gt_fallthru:\n\t"
2105 "lea 0x8(%rsp),%rsp\n\t"
2106 "pop %rax");
2107
2108 if (offset_p)
2109 *offset_p = 13;
2110 if (size_p)
2111 *size_p = 4;
2112}
2113
df4a0200 2114static void
6b9801d4
SS
2115amd64_emit_ge_goto (int *offset_p, int *size_p)
2116{
2117 EMIT_ASM (amd64_ge,
2118 "cmp %rax,(%rsp)\n\t"
2119 "jnge .Lamd64_ge_fallthru\n\t"
2120 ".Lamd64_ge_jump:\n\t"
2121 "lea 0x8(%rsp),%rsp\n\t"
2122 "pop %rax\n\t"
2123 /* jmp, but don't trust the assembler to choose the right jump */
2124 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2125 ".Lamd64_ge_fallthru:\n\t"
2126 "lea 0x8(%rsp),%rsp\n\t"
2127 "pop %rax");
2128
2129 if (offset_p)
2130 *offset_p = 13;
2131 if (size_p)
2132 *size_p = 4;
2133}
2134
6a271cae
PA
2135struct emit_ops amd64_emit_ops =
2136 {
2137 amd64_emit_prologue,
2138 amd64_emit_epilogue,
2139 amd64_emit_add,
2140 amd64_emit_sub,
2141 amd64_emit_mul,
2142 amd64_emit_lsh,
2143 amd64_emit_rsh_signed,
2144 amd64_emit_rsh_unsigned,
2145 amd64_emit_ext,
2146 amd64_emit_log_not,
2147 amd64_emit_bit_and,
2148 amd64_emit_bit_or,
2149 amd64_emit_bit_xor,
2150 amd64_emit_bit_not,
2151 amd64_emit_equal,
2152 amd64_emit_less_signed,
2153 amd64_emit_less_unsigned,
2154 amd64_emit_ref,
2155 amd64_emit_if_goto,
2156 amd64_emit_goto,
2157 amd64_write_goto_address,
2158 amd64_emit_const,
2159 amd64_emit_call,
2160 amd64_emit_reg,
2161 amd64_emit_pop,
2162 amd64_emit_stack_flush,
2163 amd64_emit_zero_ext,
2164 amd64_emit_swap,
2165 amd64_emit_stack_adjust,
2166 amd64_emit_int_call_1,
6b9801d4
SS
2167 amd64_emit_void_call_2,
2168 amd64_emit_eq_goto,
2169 amd64_emit_ne_goto,
2170 amd64_emit_lt_goto,
2171 amd64_emit_le_goto,
2172 amd64_emit_gt_goto,
2173 amd64_emit_ge_goto
6a271cae
PA
2174 };
2175
2176#endif /* __x86_64__ */
2177
2178static void
2179i386_emit_prologue (void)
2180{
2181 EMIT_ASM32 (i386_prologue,
2182 "push %ebp\n\t"
bf15cbda
SS
2183 "mov %esp,%ebp\n\t"
2184 "push %ebx");
6a271cae
PA
2185 /* At this point, the raw regs base address is at 8(%ebp), and the
2186 value pointer is at 12(%ebp). */
2187}
2188
2189static void
2190i386_emit_epilogue (void)
2191{
2192 EMIT_ASM32 (i386_epilogue,
2193 "mov 12(%ebp),%ecx\n\t"
2194 "mov %eax,(%ecx)\n\t"
2195 "mov %ebx,0x4(%ecx)\n\t"
2196 "xor %eax,%eax\n\t"
bf15cbda 2197 "pop %ebx\n\t"
6a271cae
PA
2198 "pop %ebp\n\t"
2199 "ret");
2200}
2201
2202static void
2203i386_emit_add (void)
2204{
2205 EMIT_ASM32 (i386_add,
2206 "add (%esp),%eax\n\t"
2207 "adc 0x4(%esp),%ebx\n\t"
2208 "lea 0x8(%esp),%esp");
2209}
2210
2211static void
2212i386_emit_sub (void)
2213{
2214 EMIT_ASM32 (i386_sub,
2215 "subl %eax,(%esp)\n\t"
2216 "sbbl %ebx,4(%esp)\n\t"
2217 "pop %eax\n\t"
2218 "pop %ebx\n\t");
2219}
2220
2221static void
2222i386_emit_mul (void)
2223{
2224 emit_error = 1;
2225}
2226
2227static void
2228i386_emit_lsh (void)
2229{
2230 emit_error = 1;
2231}
2232
2233static void
2234i386_emit_rsh_signed (void)
2235{
2236 emit_error = 1;
2237}
2238
2239static void
2240i386_emit_rsh_unsigned (void)
2241{
2242 emit_error = 1;
2243}
2244
2245static void
2246i386_emit_ext (int arg)
2247{
2248 switch (arg)
2249 {
2250 case 8:
2251 EMIT_ASM32 (i386_ext_8,
2252 "cbtw\n\t"
2253 "cwtl\n\t"
2254 "movl %eax,%ebx\n\t"
2255 "sarl $31,%ebx");
2256 break;
2257 case 16:
2258 EMIT_ASM32 (i386_ext_16,
2259 "cwtl\n\t"
2260 "movl %eax,%ebx\n\t"
2261 "sarl $31,%ebx");
2262 break;
2263 case 32:
2264 EMIT_ASM32 (i386_ext_32,
2265 "movl %eax,%ebx\n\t"
2266 "sarl $31,%ebx");
2267 break;
2268 default:
2269 emit_error = 1;
2270 }
2271}
2272
2273static void
2274i386_emit_log_not (void)
2275{
2276 EMIT_ASM32 (i386_log_not,
2277 "or %ebx,%eax\n\t"
2278 "test %eax,%eax\n\t"
2279 "sete %cl\n\t"
2280 "xor %ebx,%ebx\n\t"
2281 "movzbl %cl,%eax");
2282}
2283
2284static void
2285i386_emit_bit_and (void)
2286{
2287 EMIT_ASM32 (i386_and,
2288 "and (%esp),%eax\n\t"
2289 "and 0x4(%esp),%ebx\n\t"
2290 "lea 0x8(%esp),%esp");
2291}
2292
2293static void
2294i386_emit_bit_or (void)
2295{
2296 EMIT_ASM32 (i386_or,
2297 "or (%esp),%eax\n\t"
2298 "or 0x4(%esp),%ebx\n\t"
2299 "lea 0x8(%esp),%esp");
2300}
2301
2302static void
2303i386_emit_bit_xor (void)
2304{
2305 EMIT_ASM32 (i386_xor,
2306 "xor (%esp),%eax\n\t"
2307 "xor 0x4(%esp),%ebx\n\t"
2308 "lea 0x8(%esp),%esp");
2309}
2310
2311static void
2312i386_emit_bit_not (void)
2313{
2314 EMIT_ASM32 (i386_bit_not,
2315 "xor $0xffffffff,%eax\n\t"
2316 "xor $0xffffffff,%ebx\n\t");
2317}
2318
2319static void
2320i386_emit_equal (void)
2321{
2322 EMIT_ASM32 (i386_equal,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jne .Li386_equal_false\n\t"
2325 "cmpl %eax,(%esp)\n\t"
2326 "je .Li386_equal_true\n\t"
2327 ".Li386_equal_false:\n\t"
2328 "xor %eax,%eax\n\t"
2329 "jmp .Li386_equal_end\n\t"
2330 ".Li386_equal_true:\n\t"
2331 "mov $1,%eax\n\t"
2332 ".Li386_equal_end:\n\t"
2333 "xor %ebx,%ebx\n\t"
2334 "lea 0x8(%esp),%esp");
2335}
2336
2337static void
2338i386_emit_less_signed (void)
2339{
2340 EMIT_ASM32 (i386_less_signed,
2341 "cmpl %ebx,4(%esp)\n\t"
2342 "jl .Li386_less_signed_true\n\t"
2343 "jne .Li386_less_signed_false\n\t"
2344 "cmpl %eax,(%esp)\n\t"
2345 "jl .Li386_less_signed_true\n\t"
2346 ".Li386_less_signed_false:\n\t"
2347 "xor %eax,%eax\n\t"
2348 "jmp .Li386_less_signed_end\n\t"
2349 ".Li386_less_signed_true:\n\t"
2350 "mov $1,%eax\n\t"
2351 ".Li386_less_signed_end:\n\t"
2352 "xor %ebx,%ebx\n\t"
2353 "lea 0x8(%esp),%esp");
2354}
2355
2356static void
2357i386_emit_less_unsigned (void)
2358{
2359 EMIT_ASM32 (i386_less_unsigned,
2360 "cmpl %ebx,4(%esp)\n\t"
2361 "jb .Li386_less_unsigned_true\n\t"
2362 "jne .Li386_less_unsigned_false\n\t"
2363 "cmpl %eax,(%esp)\n\t"
2364 "jb .Li386_less_unsigned_true\n\t"
2365 ".Li386_less_unsigned_false:\n\t"
2366 "xor %eax,%eax\n\t"
2367 "jmp .Li386_less_unsigned_end\n\t"
2368 ".Li386_less_unsigned_true:\n\t"
2369 "mov $1,%eax\n\t"
2370 ".Li386_less_unsigned_end:\n\t"
2371 "xor %ebx,%ebx\n\t"
2372 "lea 0x8(%esp),%esp");
2373}
2374
2375static void
2376i386_emit_ref (int size)
2377{
2378 switch (size)
2379 {
2380 case 1:
2381 EMIT_ASM32 (i386_ref1,
2382 "movb (%eax),%al");
2383 break;
2384 case 2:
2385 EMIT_ASM32 (i386_ref2,
2386 "movw (%eax),%ax");
2387 break;
2388 case 4:
2389 EMIT_ASM32 (i386_ref4,
2390 "movl (%eax),%eax");
2391 break;
2392 case 8:
2393 EMIT_ASM32 (i386_ref8,
2394 "movl 4(%eax),%ebx\n\t"
2395 "movl (%eax),%eax");
2396 break;
2397 }
2398}
2399
2400static void
2401i386_emit_if_goto (int *offset_p, int *size_p)
2402{
2403 EMIT_ASM32 (i386_if_goto,
2404 "mov %eax,%ecx\n\t"
2405 "or %ebx,%ecx\n\t"
2406 "pop %eax\n\t"
2407 "pop %ebx\n\t"
2408 "cmpl $0,%ecx\n\t"
2409 /* Don't trust the assembler to choose the right jump */
2410 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2411
2412 if (offset_p)
2413 *offset_p = 11; /* be sure that this matches the sequence above */
2414 if (size_p)
2415 *size_p = 4;
2416}
2417
2418static void
2419i386_emit_goto (int *offset_p, int *size_p)
2420{
2421 EMIT_ASM32 (i386_goto,
2422 /* Don't trust the assembler to choose the right jump */
2423 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2424 if (offset_p)
2425 *offset_p = 1;
2426 if (size_p)
2427 *size_p = 4;
2428}
2429
2430static void
2431i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2432{
2433 int diff = (to - (from + size));
2434 unsigned char buf[sizeof (int)];
2435
2436 /* We're only doing 4-byte sizes at the moment. */
2437 if (size != 4)
2438 {
2439 emit_error = 1;
2440 return;
2441 }
2442
2443 memcpy (buf, &diff, sizeof (int));
4196ab2a 2444 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2445}
2446
2447static void
4e29fb54 2448i386_emit_const (LONGEST num)
6a271cae
PA
2449{
2450 unsigned char buf[16];
b00ad6ff 2451 int i, hi, lo;
6a271cae
PA
2452 CORE_ADDR buildaddr = current_insn_ptr;
2453
2454 i = 0;
2455 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2456 lo = num & 0xffffffff;
2457 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2458 i += 4;
2459 hi = ((num >> 32) & 0xffffffff);
2460 if (hi)
2461 {
2462 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2463 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2464 i += 4;
2465 }
2466 else
2467 {
2468 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2469 }
2470 append_insns (&buildaddr, i, buf);
2471 current_insn_ptr = buildaddr;
2472}
2473
2474static void
2475i386_emit_call (CORE_ADDR fn)
2476{
2477 unsigned char buf[16];
2478 int i, offset;
2479 CORE_ADDR buildaddr;
2480
2481 buildaddr = current_insn_ptr;
2482 i = 0;
2483 buf[i++] = 0xe8; /* call <reladdr> */
2484 offset = ((int) fn) - (buildaddr + 5);
2485 memcpy (buf + 1, &offset, 4);
2486 append_insns (&buildaddr, 5, buf);
2487 current_insn_ptr = buildaddr;
2488}
2489
2490static void
2491i386_emit_reg (int reg)
2492{
2493 unsigned char buf[16];
2494 int i;
2495 CORE_ADDR buildaddr;
2496
2497 EMIT_ASM32 (i386_reg_a,
2498 "sub $0x8,%esp");
2499 buildaddr = current_insn_ptr;
2500 i = 0;
2501 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2502 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2503 i += 4;
2504 append_insns (&buildaddr, i, buf);
2505 current_insn_ptr = buildaddr;
2506 EMIT_ASM32 (i386_reg_b,
2507 "mov %eax,4(%esp)\n\t"
2508 "mov 8(%ebp),%eax\n\t"
2509 "mov %eax,(%esp)");
2510 i386_emit_call (get_raw_reg_func_addr ());
2511 EMIT_ASM32 (i386_reg_c,
2512 "xor %ebx,%ebx\n\t"
2513 "lea 0x8(%esp),%esp");
2514}
2515
2516static void
2517i386_emit_pop (void)
2518{
2519 EMIT_ASM32 (i386_pop,
2520 "pop %eax\n\t"
2521 "pop %ebx");
2522}
2523
2524static void
2525i386_emit_stack_flush (void)
2526{
2527 EMIT_ASM32 (i386_stack_flush,
2528 "push %ebx\n\t"
2529 "push %eax");
2530}
2531
2532static void
2533i386_emit_zero_ext (int arg)
2534{
2535 switch (arg)
2536 {
2537 case 8:
2538 EMIT_ASM32 (i386_zero_ext_8,
2539 "and $0xff,%eax\n\t"
2540 "xor %ebx,%ebx");
2541 break;
2542 case 16:
2543 EMIT_ASM32 (i386_zero_ext_16,
2544 "and $0xffff,%eax\n\t"
2545 "xor %ebx,%ebx");
2546 break;
2547 case 32:
2548 EMIT_ASM32 (i386_zero_ext_32,
2549 "xor %ebx,%ebx");
2550 break;
2551 default:
2552 emit_error = 1;
2553 }
2554}
2555
2556static void
2557i386_emit_swap (void)
2558{
2559 EMIT_ASM32 (i386_swap,
2560 "mov %eax,%ecx\n\t"
2561 "mov %ebx,%edx\n\t"
2562 "pop %eax\n\t"
2563 "pop %ebx\n\t"
2564 "push %edx\n\t"
2565 "push %ecx");
2566}
2567
2568static void
2569i386_emit_stack_adjust (int n)
2570{
2571 unsigned char buf[16];
2572 int i;
2573 CORE_ADDR buildaddr = current_insn_ptr;
2574
2575 i = 0;
2576 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2577 buf[i++] = 0x64;
2578 buf[i++] = 0x24;
2579 buf[i++] = n * 8;
2580 append_insns (&buildaddr, i, buf);
2581 current_insn_ptr = buildaddr;
2582}
2583
2584/* FN's prototype is `LONGEST(*fn)(int)'. */
2585
2586static void
2587i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2588{
2589 unsigned char buf[16];
2590 int i;
2591 CORE_ADDR buildaddr;
2592
2593 EMIT_ASM32 (i386_int_call_1_a,
2594 /* Reserve a bit of stack space. */
2595 "sub $0x8,%esp");
2596 /* Put the one argument on the stack. */
2597 buildaddr = current_insn_ptr;
2598 i = 0;
2599 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2600 buf[i++] = 0x04;
2601 buf[i++] = 0x24;
b00ad6ff 2602 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2603 i += 4;
2604 append_insns (&buildaddr, i, buf);
2605 current_insn_ptr = buildaddr;
2606 i386_emit_call (fn);
2607 EMIT_ASM32 (i386_int_call_1_c,
2608 "mov %edx,%ebx\n\t"
2609 "lea 0x8(%esp),%esp");
2610}
2611
4e29fb54 2612/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2613
2614static void
2615i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2616{
2617 unsigned char buf[16];
2618 int i;
2619 CORE_ADDR buildaddr;
2620
2621 EMIT_ASM32 (i386_void_call_2_a,
2622 /* Preserve %eax only; we don't have to worry about %ebx. */
2623 "push %eax\n\t"
2624 /* Reserve a bit of stack space for arguments. */
2625 "sub $0x10,%esp\n\t"
2626 /* Copy "top" to the second argument position. (Note that
2627 we can't assume function won't scribble on its
2628 arguments, so don't try to restore from this.) */
2629 "mov %eax,4(%esp)\n\t"
2630 "mov %ebx,8(%esp)");
2631 /* Put the first argument on the stack. */
2632 buildaddr = current_insn_ptr;
2633 i = 0;
2634 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2635 buf[i++] = 0x04;
2636 buf[i++] = 0x24;
b00ad6ff 2637 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2638 i += 4;
2639 append_insns (&buildaddr, i, buf);
2640 current_insn_ptr = buildaddr;
2641 i386_emit_call (fn);
2642 EMIT_ASM32 (i386_void_call_2_b,
2643 "lea 0x10(%esp),%esp\n\t"
2644 /* Restore original stack top. */
2645 "pop %eax");
2646}
2647
6b9801d4 2648
df4a0200 2649static void
6b9801d4
SS
2650i386_emit_eq_goto (int *offset_p, int *size_p)
2651{
2652 EMIT_ASM32 (eq,
2653 /* Check low half first, more likely to be decider */
2654 "cmpl %eax,(%esp)\n\t"
2655 "jne .Leq_fallthru\n\t"
2656 "cmpl %ebx,4(%esp)\n\t"
2657 "jne .Leq_fallthru\n\t"
2658 "lea 0x8(%esp),%esp\n\t"
2659 "pop %eax\n\t"
2660 "pop %ebx\n\t"
2661 /* jmp, but don't trust the assembler to choose the right jump */
2662 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2663 ".Leq_fallthru:\n\t"
2664 "lea 0x8(%esp),%esp\n\t"
2665 "pop %eax\n\t"
2666 "pop %ebx");
2667
2668 if (offset_p)
2669 *offset_p = 18;
2670 if (size_p)
2671 *size_p = 4;
2672}
2673
df4a0200 2674static void
6b9801d4
SS
2675i386_emit_ne_goto (int *offset_p, int *size_p)
2676{
2677 EMIT_ASM32 (ne,
2678 /* Check low half first, more likely to be decider */
2679 "cmpl %eax,(%esp)\n\t"
2680 "jne .Lne_jump\n\t"
2681 "cmpl %ebx,4(%esp)\n\t"
2682 "je .Lne_fallthru\n\t"
2683 ".Lne_jump:\n\t"
2684 "lea 0x8(%esp),%esp\n\t"
2685 "pop %eax\n\t"
2686 "pop %ebx\n\t"
2687 /* jmp, but don't trust the assembler to choose the right jump */
2688 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2689 ".Lne_fallthru:\n\t"
2690 "lea 0x8(%esp),%esp\n\t"
2691 "pop %eax\n\t"
2692 "pop %ebx");
2693
2694 if (offset_p)
2695 *offset_p = 18;
2696 if (size_p)
2697 *size_p = 4;
2698}
2699
df4a0200 2700static void
6b9801d4
SS
2701i386_emit_lt_goto (int *offset_p, int *size_p)
2702{
2703 EMIT_ASM32 (lt,
2704 "cmpl %ebx,4(%esp)\n\t"
2705 "jl .Llt_jump\n\t"
2706 "jne .Llt_fallthru\n\t"
2707 "cmpl %eax,(%esp)\n\t"
2708 "jnl .Llt_fallthru\n\t"
2709 ".Llt_jump:\n\t"
2710 "lea 0x8(%esp),%esp\n\t"
2711 "pop %eax\n\t"
2712 "pop %ebx\n\t"
2713 /* jmp, but don't trust the assembler to choose the right jump */
2714 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2715 ".Llt_fallthru:\n\t"
2716 "lea 0x8(%esp),%esp\n\t"
2717 "pop %eax\n\t"
2718 "pop %ebx");
2719
2720 if (offset_p)
2721 *offset_p = 20;
2722 if (size_p)
2723 *size_p = 4;
2724}
2725
df4a0200 2726static void
6b9801d4
SS
2727i386_emit_le_goto (int *offset_p, int *size_p)
2728{
2729 EMIT_ASM32 (le,
2730 "cmpl %ebx,4(%esp)\n\t"
2731 "jle .Lle_jump\n\t"
2732 "jne .Lle_fallthru\n\t"
2733 "cmpl %eax,(%esp)\n\t"
2734 "jnle .Lle_fallthru\n\t"
2735 ".Lle_jump:\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2737 "pop %eax\n\t"
2738 "pop %ebx\n\t"
2739 /* jmp, but don't trust the assembler to choose the right jump */
2740 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2741 ".Lle_fallthru:\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2743 "pop %eax\n\t"
2744 "pop %ebx");
2745
2746 if (offset_p)
2747 *offset_p = 20;
2748 if (size_p)
2749 *size_p = 4;
2750}
2751
df4a0200 2752static void
6b9801d4
SS
2753i386_emit_gt_goto (int *offset_p, int *size_p)
2754{
2755 EMIT_ASM32 (gt,
2756 "cmpl %ebx,4(%esp)\n\t"
2757 "jg .Lgt_jump\n\t"
2758 "jne .Lgt_fallthru\n\t"
2759 "cmpl %eax,(%esp)\n\t"
2760 "jng .Lgt_fallthru\n\t"
2761 ".Lgt_jump:\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2763 "pop %eax\n\t"
2764 "pop %ebx\n\t"
2765 /* jmp, but don't trust the assembler to choose the right jump */
2766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2767 ".Lgt_fallthru:\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2769 "pop %eax\n\t"
2770 "pop %ebx");
2771
2772 if (offset_p)
2773 *offset_p = 20;
2774 if (size_p)
2775 *size_p = 4;
2776}
2777
df4a0200 2778static void
6b9801d4
SS
2779i386_emit_ge_goto (int *offset_p, int *size_p)
2780{
2781 EMIT_ASM32 (ge,
2782 "cmpl %ebx,4(%esp)\n\t"
2783 "jge .Lge_jump\n\t"
2784 "jne .Lge_fallthru\n\t"
2785 "cmpl %eax,(%esp)\n\t"
2786 "jnge .Lge_fallthru\n\t"
2787 ".Lge_jump:\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2789 "pop %eax\n\t"
2790 "pop %ebx\n\t"
2791 /* jmp, but don't trust the assembler to choose the right jump */
2792 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2793 ".Lge_fallthru:\n\t"
2794 "lea 0x8(%esp),%esp\n\t"
2795 "pop %eax\n\t"
2796 "pop %ebx");
2797
2798 if (offset_p)
2799 *offset_p = 20;
2800 if (size_p)
2801 *size_p = 4;
2802}
2803
6a271cae
PA
2804struct emit_ops i386_emit_ops =
2805 {
2806 i386_emit_prologue,
2807 i386_emit_epilogue,
2808 i386_emit_add,
2809 i386_emit_sub,
2810 i386_emit_mul,
2811 i386_emit_lsh,
2812 i386_emit_rsh_signed,
2813 i386_emit_rsh_unsigned,
2814 i386_emit_ext,
2815 i386_emit_log_not,
2816 i386_emit_bit_and,
2817 i386_emit_bit_or,
2818 i386_emit_bit_xor,
2819 i386_emit_bit_not,
2820 i386_emit_equal,
2821 i386_emit_less_signed,
2822 i386_emit_less_unsigned,
2823 i386_emit_ref,
2824 i386_emit_if_goto,
2825 i386_emit_goto,
2826 i386_write_goto_address,
2827 i386_emit_const,
2828 i386_emit_call,
2829 i386_emit_reg,
2830 i386_emit_pop,
2831 i386_emit_stack_flush,
2832 i386_emit_zero_ext,
2833 i386_emit_swap,
2834 i386_emit_stack_adjust,
2835 i386_emit_int_call_1,
6b9801d4
SS
2836 i386_emit_void_call_2,
2837 i386_emit_eq_goto,
2838 i386_emit_ne_goto,
2839 i386_emit_lt_goto,
2840 i386_emit_le_goto,
2841 i386_emit_gt_goto,
2842 i386_emit_ge_goto
6a271cae
PA
2843 };
2844
2845
2846static struct emit_ops *
2847x86_emit_ops (void)
2848{
2849#ifdef __x86_64__
3aee8918 2850 if (is_64bit_tdesc ())
6a271cae
PA
2851 return &amd64_emit_ops;
2852 else
2853#endif
2854 return &i386_emit_ops;
2855}
2856
3ca4edb6 2857/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2858
3ca4edb6
TBA
2859const gdb_byte *
2860x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2861{
2862 *size = x86_breakpoint_len;
2863 return x86_breakpoint;
2864}
2865
c2d6af84
PA
2866static int
2867x86_supports_range_stepping (void)
2868{
2869 return 1;
2870}
2871
7d00775e
AT
2872/* Implementation of linux_target_ops method "supports_hardware_single_step".
2873 */
2874
2875static int
2876x86_supports_hardware_single_step (void)
2877{
2878 return 1;
2879}
2880
ae91f625
MK
2881static int
2882x86_get_ipa_tdesc_idx (void)
2883{
2884 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2885 const struct target_desc *tdesc = regcache->tdesc;
2886
2887#ifdef __x86_64__
b4570e4b 2888 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2889#endif
2890
f49ff000 2891 if (tdesc == tdesc_i386_linux_no_xml)
ae91f625 2892 return X86_TDESC_SSE;
ae91f625 2893
f49ff000 2894 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2895}
2896
d0722149
DE
2897/* This is initialized assuming an amd64 target.
2898 x86_arch_setup will correct it for i386 or amd64 targets. */
2899
2900struct linux_target_ops the_low_target =
2901{
d0722149
DE
2902 1,
2903 x86_breakpoint_at,
802e8e6d 2904 x86_supports_z_point_type,
aa5ca48f
DE
2905 x86_insert_point,
2906 x86_remove_point,
2907 x86_stopped_by_watchpoint,
2908 x86_stopped_data_address,
d0722149
DE
2909 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2910 native i386 case (no registers smaller than an xfer unit), and are not
2911 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2912 NULL,
2913 NULL,
2914 /* need to fix up i386 siginfo if host is amd64 */
2915 x86_siginfo_fixup,
aa5ca48f 2916 x86_linux_new_process,
04ec7890 2917 x86_linux_delete_process,
aa5ca48f 2918 x86_linux_new_thread,
466eecee 2919 x86_linux_delete_thread,
3a8a0396 2920 x86_linux_new_fork,
1570b33e 2921 x86_linux_prepare_to_resume,
219f2f23 2922 x86_linux_process_qsupported,
fa593d66
PA
2923 x86_supports_tracepoints,
2924 x86_get_thread_area,
6a271cae 2925 x86_install_fast_tracepoint_jump_pad,
405f8e94
SS
2926 x86_emit_ops,
2927 x86_get_min_fast_tracepoint_insn_len,
c2d6af84 2928 x86_supports_range_stepping,
7d00775e 2929 x86_supports_hardware_single_step,
82075af2 2930 x86_get_syscall_trapinfo,
ae91f625 2931 x86_get_ipa_tdesc_idx,
d0722149 2932};
3aee8918 2933
ef0478f6
TBA
2934/* The linux target ops object. */
2935
2936linux_process_target *the_linux_target = &the_x86_target;
2937
3aee8918
PA
2938void
2939initialize_low_arch (void)
2940{
2941 /* Initialize the Linux target descriptions. */
2942#ifdef __x86_64__
cc397f3a 2943 tdesc_amd64_linux_no_xml = allocate_target_description ();
b4570e4b
YQ
2944 copy_target_description (tdesc_amd64_linux_no_xml,
2945 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2946 false));
3aee8918
PA
2947 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2948#endif
f49ff000 2949
cc397f3a 2950 tdesc_i386_linux_no_xml = allocate_target_description ();
f49ff000
YQ
2951 copy_target_description (tdesc_i386_linux_no_xml,
2952 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2953 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2954
2955 initialize_regsets_info (&x86_regsets_info);
2956}
This page took 1.835147 seconds and 4 git commands to generate.